Compare commits

..

283 Commits

Author SHA1 Message Date
vazmin 4adf6b3227 gh actions: upgrade to 1.2.11-1 2025-11-23 10:48:22 +00:00
vazmin f4a799402e gh actions: upgrade to 1.2.11-1 2025-11-23 10:00:56 +00:00
vazmin 27510e9641 gh actions: upgrade to 1.2.11-1 2025-11-23 09:06:43 +00:00
YuQing 848077797b upgrade version to 1.2.11 2025-11-16 17:01:06 +08:00
YuQing d22f9da49c bugfixed: MUST call sf_hold_task in sf_nio_notify for rare case 2025-11-16 15:29:38 +08:00
YuQing 5495455fa7 do NOT call task->finish_callback 2025-11-15 11:14:14 +08:00
YuQing 4da0ff251c upgrade version to 1.2.10 2025-11-11 09:57:18 +08:00
YuQing 2444eac6ce declare use_send_zc field anyway 2025-11-11 09:52:42 +08:00
YuQing a52cc2d5d4 check sf_context->use_io_uring more 2025-11-05 09:58:37 +08:00
YuQing c4af33a497 declare use_io_uring correctly 2025-11-04 15:55:33 +08:00
YuQing fa32972052 move use_io_uring and use_send_zc to struct sf_context 2025-11-04 15:40:00 +08:00
YuQing 688211fbcd correct compile error 2025-11-03 15:22:45 +08:00
YuQing 1b2f521b99 uring cancel callback release task correctly 2025-11-03 14:56:29 +08:00
YuQing ddc528d69d restore function sf_client_sock_in_read_stage 2025-11-02 15:02:54 +08:00
YuQing 32d443b497 MUST call set_read_event anyway after socket send done 2025-10-27 12:28:24 +08:00
YuQing 53dd39500f upgrade version to 1.2.9 2025-10-26 12:27:49 +08:00
YuQing 772a9a6895 Merge remote-tracking branch 'origin/use_iouring' 2025-10-26 12:26:53 +08:00
YuQing 932751d392 send zc done notify callback for recycling buffer 2025-10-20 10:34:47 +08:00
YuQing 817ff547da set alloc_conn_once and max_entries gracefully 2025-10-12 12:28:11 +08:00
YuQing 926cd40114 ioevent_init: set max entries for io_uring gracefully 2025-10-12 10:24:35 +08:00
YuQing b688973cf9 change use_send_zc's default value to true 2025-10-09 14:01:32 +08:00
YuQing b16526e8f7 bug fixed: check_task process correctly 2025-10-06 20:55:29 +08:00
YuQing 68079fc468 IOEventCallback: change event type from short to int 2025-10-05 16:53:21 +08:00
YuQing 3dcc1c570d call sf_proto_init_task_magic when task->shrinked 2025-10-03 21:06:58 +08:00
YuQing cf0950ea62 sf_set_read_event just skipped when use_io_uring is true 2025-10-03 11:33:26 +08:00
YuQing 263171c4fe async_connect use io_uring 2025-09-30 11:26:11 +08:00
YuQing a2ab8a0c01 adapt Linux io_uring OK 2025-09-27 15:41:56 +08:00
YuQing 0f75b039f6 sf_logger_set_schedule_entry change to sf_logger_set_schedule_entries 2025-09-26 19:57:03 +08:00
YuQing ecee21f289 socket send and recv adapt to io_uring 2025-09-25 15:54:38 +08:00
YuQing bc5af8a58b struct sf_network_handler add field use_iouring 2025-09-24 15:59:27 +08:00
YuQing f0ee6ce73f struct sf_context remove field: remove_from_ready_list 2025-09-21 15:08:08 +08:00
vazmin aef9d803d1 gh actions: upgrade to 1.2.8-1 2025-08-16 16:32:03 +00:00
YuQing 6d0465acc4 upgrade version to 1.2.8 2025-08-14 09:44:39 +08:00
YuQing 2e5258045d change SF_BINLOG_FILE_PREFIX to SF_BINLOG_FILE_PREFIX_STR 2025-08-10 12:03:25 +08:00
YuQing fc689a02ba rename fc_combine_two_string to fc_combine_two_strings 2025-08-09 15:22:04 +08:00
YuQing 7b3f6d620a use fc_safe_strcpy and fc_combine_two_string 2025-08-08 21:52:08 +08:00
YuQing 03f64998ce performance opt.: replace snprintf 2025-08-07 19:53:38 +08:00
YuQing 0b4936bd8f performance opt.: replace sprintf again 2025-08-05 18:05:56 +08:00
YuQing ba87f8e1ea performance opt.: replace sprintf 2025-08-04 16:57:35 +08:00
YuQing da2ddc7441 sf_log_config for client correctly 2025-04-23 15:17:23 +08:00
vazmin b83189f021 gh actions: upgrade to 1.2.7-1 2025-04-06 16:56:40 +00:00
YuQing 4ad53f7ee3 correct libserverframe release version 2025-04-01 17:21:01 +08:00
YuQing 003cc74b96 upgrade version to 1.2.7 2025-04-01 16:49:38 +08:00
YuQing 3815c0b2ce export function sf_file_writer_direct_write 2025-03-15 10:10:01 +08:00
YuQing 7ee7e7b535 add function sf_file_writer_get_last_line 2025-03-10 17:01:03 +08:00
YuQing e095ce45c2 add function sf_file_writer_flush_buffer 2025-03-05 20:05:56 +08:00
YuQing f9d8c3d070 add function write_to_binlog_index_file 2025-02-18 15:49:00 +08:00
YuQing fc9c23fb95 call flush_writer_files before rotate file 2025-02-17 10:54:07 +08:00
YuQing 755296bb68 add function sf_proto_send_buf2 2025-02-12 21:59:04 +08:00
YuQing 14a783fd6f sf_binlog_writer_rotate_file can skip empty file 2025-02-10 18:34:04 +08:00
YuQing e9e33883bf Merge branch 'master' of gitee.com:fastdfs100/libserverframe 2025-02-09 09:30:42 +08:00
YuQing 318640572f sf_file_writer.[hc]: support write done callback 2025-02-09 09:26:34 +08:00
YuQing f6e2de5668 upgrade version to 1.2.6 2025-01-27 20:50:18 +08:00
YuQing b6e24d0548 sf_connection_manager.[hc]: get connection functions add parameter shared 2025-01-27 10:59:57 +08:00
YuQing 03974ff302 explicit cast for fast_mblock_alloc_object 2024-12-08 09:29:16 +08:00
YuQing a3e1956128 change macro sf_log_network_error_for_update_ex 2024-10-29 09:52:45 +08:00
YuQing 75e8aacfd9 sf_binlog_writer.[hc] add parameter write_interval_ms for high performance 2024-10-07 09:21:19 +08:00
vazmin be4bad4ee1 gh actions: upgrade to 1.2.5-1 2024-09-29 15:24:39 +00:00
YuQing 3993b79a81 sf_connection_manager_init: set htable_capacity gracefully 2024-09-22 12:21:06 +08:00
YuQing 42c6e2c6b7 struct sf_context add field is_client for RDMA callback alloc_pd 2024-09-20 14:16:59 +08:00
YuQing 6a60a49c32 upgrade version to 1.2.5 2024-09-17 11:24:01 +08:00
YuQing 14d0a1c014 task init callback support extra argument 2024-09-15 12:06:25 +08:00
vazmin a01ccf66dc gh actions: upgrade to 1.2.4-1 2024-06-15 14:45:44 +00:00
YuQing 63d43fc9cc upgrade version to 1.2.4 2024-06-11 09:39:08 +08:00
YuQing e83be7356d change default values of connect timeout and network timeout 2024-04-28 16:20:59 +08:00
vazmin 3dfdb8ace6 gh actions: upgrade to 1.2.3-1 2024-03-17 15:11:04 +00:00
YuQing 35d9be16ee upgrade version to V1.2.3 2024-03-11 11:18:42 +08:00
YuQing 231e2610e5 log square quoted IPv6 address 2024-03-05 18:07:34 +08:00
YuQing 1c796ab819 sf_file_writer_init add parameter: max_record_size 2024-02-29 11:58:05 +08:00
YuQing 7f6ad6dcba sf_load_context_from_config_ex support max_pkg_size_min_value 2024-02-26 16:17:41 +08:00
YuQing 78d65ba2c6 net buffer config for each sf_context instance 2024-02-20 09:53:52 +08:00
YuQing d5a9f40a66 correct macros for struct SFBinlogBuffer 2024-02-15 15:13:40 +08:00
YuQing 9d3a92d7da fixed macro SF_BINLOG_BUFFER_LENGTH 2024-02-05 17:05:39 +08:00
YuQing cab9ce4c4f set flags of protocol header 2024-02-04 09:22:56 +08:00
vazmin 8ea4e28f73 gh actions: upgrade to 1.2.2-1 2024-01-31 12:00:10 +00:00
YuQing 930784191e upgrade version to 1.2.2 2024-01-30 10:49:58 +08:00
YuQing e20a2c04c2 set header flags in sf_proto_init_task_context 2024-01-29 11:08:16 +08:00
vazmin 782697414d gh actions: upgrade to 1.2.1-1 2024-01-01 11:24:45 +00:00
YuQing c861b1cf67 upgrade version to 1.2.1 2023-12-08 15:23:56 +08:00
YuQing c2e7b6e711 add function sf_set_address_family_by_ip 2023-12-05 08:16:54 +08:00
YuQing a969a0df07 support address family IPv4, IPv6 and both 2023-11-29 18:52:02 +08:00
YuQing 5618afabbb code adjust for pull request #6 2023-11-23 16:16:00 +08:00
YuQing 0e1fcdafce
Merge pull request #6 from sunqiangwei1988/master
Added: 增加IPv6支持
2023-11-23 15:58:43 +08:00
YuQing 951d010512
Merge branch 'master' into master 2023-11-23 15:58:26 +08:00
vazmin 9b6c64e346 gh actions: upgrade to 1.2.0-3 2023-11-21 14:36:16 +00:00
YuQing 413f6eef76 add function sf_global_init 2023-11-21 15:54:24 +08:00
vazmin 153905dc44 gh actions: upgrade to 1.2.0-2 2023-11-20 13:24:02 +00:00
vazmin 960e84e2b5 gh actions: upgrade to 1.2.0-1 2023-11-19 14:46:16 +00:00
YuQing a9f4447686 log more info for sf_proto_set_body_length 2023-11-16 10:42:24 +08:00
YuQing cf9088fb0c use task->pending_send_count to prevent re-entry 2023-11-06 10:54:50 +08:00
sunqiangwei1988 c619a5336d Added: 增加IPv6支持
1、增加检测主机是否配置IPv4地址和是否配置IPv6地址的方法。
2、修改sf_service.c文件中_socket_server方法,以支持IPv4和IPv6地址,当服务器为双栈时,优先选择IPv4地址。
2023-11-02 10:27:22 +08:00
YuQing a8867a19c4 sf_send_data_callback and sf_send_done_callback changed 2023-10-26 10:48:22 +08:00
YuQing 89a451b8ce call set_read_event in func sf_nio_deal_task 2023-10-25 10:02:47 +08:00
YuQing db00a7add8 set task recv offset and length correctly 2023-10-24 21:22:56 +08:00
YuQing c3f7254838 add inited variable for run_by struct 2023-10-18 17:20:14 +08:00
YuQing 7f6e7b12b4 remove useless variable: length 2023-09-29 15:01:23 +08:00
YuQing 71e7066c81 invoke send done callback correctly 2023-09-29 14:59:58 +08:00
YuQing 5f5db2b998 support explicit post recv for RDMA 2023-09-28 22:20:52 +08:00
YuQing 60d6b49998 rdma callback get_buffer rename to get_recv_buffer 2023-09-27 11:23:50 +08:00
YuQing a0fe474198 add functions: sf_xxxx_task_send/recv_buffer 2023-09-26 15:51:26 +08:00
YuQing 17c99cdd55 sf_nio_task_is_idle rename to sf_nio_task_send_done 2023-09-26 15:26:42 +08:00
YuQing f8e3fcdc55 adapt to the newest struct fast_task_info from libfastcommon 2023-09-25 18:37:53 +08:00
YuQing cd1920872a sf_recv_response_header check recv length for rdma 2023-09-24 14:31:37 +08:00
YuQing fee1e28348 SF_NIO_STAGE_CONNECT stage should call inc_connection_current_count 2023-09-22 18:44:32 +08:00
YuQing be9b71422f nio support callback connect_done for client 2023-09-22 18:27:12 +08:00
YuQing 3a413408ad add func sf_alloc_init_task_ex to specify reffer_count 2023-09-21 09:03:45 +08:00
YuQing 3c7ebd14d9 MUST call fast_timer_remove after ioevent_detach 2023-09-19 09:37:17 +08:00
YuQing 9fad04f3f9 nio threads support busy_polling_callback 2023-09-18 16:19:10 +08:00
YuQing 9731e736df idempotency support RDMA 2023-09-15 10:39:03 +08:00
YuQing 0eb842dc09 sf_nio.c: check_task adapt to RDMA 2023-09-14 09:50:49 +08:00
YuQing 435ae6bb84 remove quotes for macro LOAD_API 2023-09-13 21:23:47 +08:00
YuQing c6d4612862 send and recv data adapt for RDMA 2023-09-12 16:03:22 +08:00
YuQing fca50e6d49 sf_load_config support fixed_buff_size 2023-09-11 11:36:30 +08:00
YuQing 2463725570 use the newest conn_pool_init_ex1 from libfastcommon 2023-09-10 20:55:46 +08:00
YuQing 12637bf181 set rdma handler listen port 2023-09-08 07:58:46 +08:00
YuQing dedc023235 add parameter comm_type when load from config 2023-09-07 09:38:21 +08:00
YuQing b3334d2ad5 add function sf_set_body_length 2023-09-05 16:45:51 +08:00
YuQing e22400fa1c struct fast_task_info support padding_size for RDMA connection 2023-09-05 09:21:45 +08:00
YuQing 5a29dffc50 load RDMA APIs from library 2023-09-05 07:27:15 +08:00
YuQing 2839183433 move type SFNetworkType to libfastcommon as FCNetworkType 2023-09-04 11:01:36 +08:00
YuQing 36e4922440 callbacks impl. for socket 2023-09-03 18:35:31 +08:00
YuQing 96c7bc9a42 function prototype for socket and rdma both 2023-09-03 11:50:50 +08:00
vazmin 58a796e169 gh actions: upgrade to 1.1.29-1 2023-08-06 07:22:46 +00:00
YuQing c21cc936ef upgrade version to 1.1.29 2023-08-06 09:04:32 +08:00
YuQing f9f7b0f159 add function sf_serializer_pack_id_name_skiplist 2023-08-05 20:48:40 +08:00
YuQing e440273f35 sf_binlog_writer.c: flow control more rigorously 2023-07-30 10:11:00 +08:00
YuQing 27a7696867 bugfixed: sf_binlog_index.c call parse only when row_count > 0 2023-07-29 09:48:25 +08:00
YuQing 024a6b0e8a improve robustness of binlog writer flow control 2023-07-27 10:54:59 +08:00
YuQing 6ce1a711f9 add inline function: sf_binlog_writer_get_waiting_count etc. 2023-07-26 10:35:58 +08:00
vazmin 3e3162c825 gh actions: upgrade to 1.1.28-1 2023-07-23 14:28:20 +00:00
YuQing 8fdb8599c9 upgrade version to 1.1.28 2023-07-07 08:26:32 +08:00
YuQing 566c055f27 use libfastcommon V1.68 2023-07-05 18:09:53 +08:00
YuQing 5e8535db9c function sf_push_to_binlog_write_queue changed 2023-06-30 10:40:05 +08:00
YuQing 1abf7402ca log info when flow ctrol waiting time > 0 gracefully 2023-06-30 10:29:18 +08:00
YuQing d006954ceb sf_binlog_writer_init support call_fsync parameter 2023-06-27 18:19:36 +08:00
YuQing 6e071410dc log warning when flow ctrol waiting time > 0 2023-06-27 16:39:37 +08:00
vazmin e34cc12ae5 gh actions: upgrade to 1.1.27-1 2023-06-24 06:51:30 +00:00
YuQing b15faf68f4 upgrade version to 1.1.27 2023-06-17 15:22:06 +08:00
YuQing a95f4cc725 sf_binlog_writer.[hc]: use config max_delay for flow control 2023-06-16 17:59:29 +08:00
YuQing c9fba3b9a7 Merge branch 'master' of github.com:happyfish100/libserverframe 2023-06-10 14:48:29 +08:00
YuQing 2a245a06aa sf_file_writer.[hc] support config call_fsync for performance 2023-06-10 14:32:00 +08:00
vazmin 6f60ff5825 gh actions: upgrade to 1.1.26-1 2023-06-04 10:52:11 +00:00
YuQing d5139804f9 adapt newest fast_mblock_init_ex2 2023-05-19 11:21:15 +08:00
YuQing 0989cc02fe remove debug info in request_metadata.c 2023-05-18 20:20:27 +08:00
YuQing 5786b0383f request_metadata.c: set thread name in Linux 2023-05-18 16:10:51 +08:00
YuQing 3b946778dd upgrade version to 1.1.26 2023-05-16 09:41:33 +08:00
YuQing 32706b6275 add function sf_socket_close 2023-05-10 20:29:14 +08:00
YuQing 7c6673f78a connection manager support exclude server_id for server side 2023-05-05 16:22:04 +08:00
YuQing 90e144920a use new sorted queue with double link chain for quick push 2023-05-04 20:07:23 +08:00
YuQing c5d64a0d54 change field lc_pair to lcp 2023-03-27 16:27:57 +08:00
YuQing 2272bf2707 add macro func: sf_file_writer_get_last_version_silence 2023-03-27 15:32:25 +08:00
YuQing 0328b32766 sf_file_writer_get_last_version support log_level 2023-03-14 09:48:22 +08:00
YuQing 404f374397 remove useless field: tag 2023-03-12 11:29:56 +08:00
YuQing dfc14de25d add type SFBlockSliceKeyInfo for libdiskallocator 2023-03-10 19:11:16 +08:00
YuQing 30ebb55c27 add type SFBlockKey and SFSliceSize for libdiskallocator and faststore 2023-03-05 08:48:36 +08:00
YuQing 077154f75f add macro func sf_log_network_error_for_delete_ex 2023-03-02 11:25:53 +08:00
YuQing 8bedbb6f27 code simplification for last commit 2023-02-23 10:51:46 +08:00
YuQing 92fbcab0f4 bugfixed: fastdfs issue #620
set notify.stage to SF_NIO_STAGE_NONE before deal_notified_task
2023-02-23 10:30:40 +08:00
vazmin ac923ebaf8 gh actions: upgrade to 1.1.25-1 2023-02-18 05:44:50 +00:00
YuQing a9ebe20b5b upgrade version to 1.1.25 2023-02-15 21:04:04 +08:00
YuQing 13990e3747 code simplification for epoll edge trigger 2023-02-12 20:04:01 +08:00
YuQing 294ad5e636 use field notify_next for notify queue of nio thread 2023-02-12 19:47:31 +08:00
YuQing 7f758fd293 init epoll_edge_trigger to false 2023-02-12 12:20:06 +08:00
YuQing c1ae024da5 enable epoll edge trigger by global variable epoll_edge_trigger 2023-02-12 10:38:46 +08:00
vazmin 1dd9ac656f gh actions: upgrade to 1.1.24-1 2023-01-15 13:50:15 +00:00
YuQing 5a8452721d upgrade version to 1.1.24 2023-01-14 08:40:47 +08:00
YuQing 91f0564158 change log level to debug for hash entry reclaim 2023-01-13 14:25:30 +08:00
YuQing 69f117c956 check socket connected on unexpected stage 2022-12-30 17:23:18 +08:00
YuQing ca3f14df6e get_leader_connection: failover on multi ip addresses 2022-12-23 09:35:01 +08:00
YuQing a6c8c65371 log address count when make_connection fail 2022-12-22 16:04:32 +08:00
YuQing 3ccec6eb36 add function sf_load_data_path_config_ex 2022-12-21 15:45:03 +08:00
YuQing f3afc0af6e show patch part of version info anyway 2022-11-25 16:04:13 +08:00
vazmin e54f2d413e gh actions: upgrade to 1.1.22-1 2022-11-21 14:55:50 +00:00
vazmin 777713e0e4 debian: installation dir changes 2022-11-21 22:36:29 +08:00
YuQing a2dc31dc88 upgrade version to 1.1.23 2022-11-21 08:16:56 +08:00
YuQing f262e60259 make.sh set LIB_VERSION to lib for Ubuntu and Debian 2022-11-20 17:00:35 +08:00
YuQing 3578c0f0af Makefile.in: force symlink library 2022-11-13 17:16:39 +08:00
YuQing 5ca1f6dda6 use newest function normalize_path from libfastcommon 2022-11-07 08:30:49 +08:00
YuQing 53fea21135 make.sh auto create symlink for include 2022-11-07 08:30:09 +08:00
YuQing 3191d01e38 convert errnos: ENOLINK, ENOTEMPTY and ELOOP 2022-11-03 11:11:49 +08:00
YuQing f2bfe72a4f requires libfastcommon 1.0.63 2022-10-26 09:58:16 +08:00
YuQing be38181f71 upgrade version to 1.1.22 2022-10-26 09:53:47 +08:00
YuQing 0b89c09371 SFProtoGetServerStatusReq add field: auth_enabled 2022-10-25 09:24:55 +08:00
YuQing 5522165e5c add macro SF_PROTO_CLIENT_SET_REQ_EX 2022-10-24 20:46:53 +08:00
vazmin 97f9db7a17 gh actions: upgrade to 1.1.21-1 2022-10-08 13:28:40 +00:00
YuQing 8e42e9640d upgrade version to 1.1.21 2022-10-08 09:30:01 +08:00
YuQing 3b5d580b36 set last_versions.done after write 2022-09-30 15:37:05 +08:00
YuQing d4676e9d71 sf_binlog_writer.[hc]: support passive write 2022-09-29 11:44:02 +08:00
vazmin 230250d2f3 gh actions: upgrade to 1.1.20-1 2022-09-22 12:22:39 +00:00
YuQing 16be02e8fd upgrade version to 1.1.20 2022-09-22 09:14:21 +08:00
YuQing 4a30dfe844 add macro: sf_log_network_error_for_update_ex 2022-09-22 08:40:03 +08:00
YuQing 4f3cde053c auto create base_path when it not exist 2022-09-21 11:37:51 +08:00
YuQing 45531cf0c8 output refine for receipt_recv_timeout_callback 2022-09-20 20:39:55 +08:00
YuQing 99078203c0 upgrade version to V1.1.19 2022-09-15 10:41:32 +08:00
YuQing 61d2762411 Merge branch 'master' of github.com:happyfish100/libserverframe 2022-09-14 10:41:12 +08:00
YuQing b7b346ea7f sf_load_global_config_ex add params: max_pkg_size_item_name and need_set_run_by 2022-09-14 10:40:44 +08:00
YuQing cf4856e04b support send_done_callback for FastDFS 2022-09-14 10:38:38 +08:00
vazmin e64bf7f15e gh actions: upgrade to 1.1.18-1 2022-09-07 13:36:38 +00:00
YuQing 78337ec4a3 upgrade version to 1.1.18 2022-09-04 13:51:29 +08:00
YuQing 5ee8ce8fe7 correct macro function sf_service_init 2022-09-03 10:03:09 +08:00
YuQing a0f16319e0 struct sf_shared_mbuffer: Must move buff to last 2022-08-30 11:07:31 +08:00
YuQing 8b22655352 add function sf_release_task_shared_mbuffer 2022-08-28 17:25:50 +08:00
YuQing c27cb2a9af add files: sf_shared_mbuffer.[hc] 2022-08-27 21:39:31 +08:00
YuQing 2ebb51dcfd support alloc_recv_buffer callback 2022-08-25 18:22:16 +08:00
YuQing 3257a5f842 function sf_get_base_path_from_conf_file impl. 2022-08-20 09:59:36 +08:00
vazmin f63843765a gh actions: upgrade to 1.1.17-1 2022-08-15 13:31:54 +00:00
YuQing f08b81b3b9 upgrade version to 1.1.17 2022-08-15 15:19:28 +08:00
YuQing dc9267188d replication quorum support smart mode 2022-08-02 16:22:54 +08:00
YuQing 2d01d91b87 sf_connection_manager.c log module name 2022-07-29 16:40:41 +08:00
vazmin f433589d05 gh actions: upgrade to 1.1.16-1 2022-07-25 13:52:09 +00:00
YuQing 809a1bf997 upgrade version to V1.1.16 2022-07-24 14:58:09 +08:00
YuQing 94ee91d37d bugfixed: sf_file_writer_get_last_lines deal correctly when cross files 2022-07-21 18:34:11 +08:00
YuQing 5da65a172c request_metadata.c: check data_version > 0 for performance 2022-07-08 11:23:32 +08:00
YuQing 803d3cb626 IdempotencyRequestMetadata add field n for integer argument 2022-07-05 19:25:55 +08:00
YuQing a966d1bf4d sf_synchronize_finished_notify_no_lock impl. 2022-07-05 09:01:37 +08:00
YuQing 7cfb8dc89d add functions sf_synchronize_finished_notify/wait 2022-07-04 11:01:24 +08:00
YuQing d95e3ed679 remove debug info 2022-06-30 17:59:44 +08:00
YuQing f63ede788e generate seq_id only once per RPC 2022-06-29 15:02:38 +08:00
YuQing 990ef2d173 request_metadata.[hc] v2 impl. 2022-06-27 22:17:17 +08:00
YuQing 92613c765f request_metadata.[hc] first verson finished 2022-06-27 17:17:57 +08:00
YuQing b364a875c2 add files idempotency/server/request_metadata.[hc] 2022-06-27 11:30:01 +08:00
YuQing c6300318c8 use macro FC_SET_CLOEXEC from libfastcommon 2022-06-25 11:24:59 +08:00
YuQing dfc58be3ec add func sf_nio_add_to_deleted_list 2022-06-25 09:21:02 +08:00
YuQing 8824c35975 open file with flag O_CLOEXEC 2022-06-24 18:56:28 +08:00
YuQing 56ccde45ba idempotency seq_id includes server id and channel id for global unique 2022-06-24 10:37:10 +08:00
YuQing 7f7ba8d835 support set next version when order_mode is VARY 2022-06-24 07:46:04 +08:00
YuQing 22ffe6841d change default values of log_file_rotate_everyday and log_file_keep_days 2022-06-18 18:19:35 +08:00
YuQing 6dd3bfbb22 sf_replication_quorum_check changed 2022-06-17 11:24:31 +08:00
YuQing e8e6cfc64a add replication quorum type and functions 2022-06-16 16:01:01 +08:00
vazmin e344feb092 gh actions: upgrade to 1.1.15-1 2022-06-15 14:26:27 +00:00
YuQing 98c85ba7eb libserverframe.spec: upgrade version 2022-06-06 20:34:57 +08:00
YuQing 1d1d4c9f00 sf_file_writer_get_binlog_indexes ignore file not exist 2022-06-03 15:32:07 +08:00
YuQing bcd1120617 sf_file_writer support specifying file prefix 2022-05-31 21:19:15 +08:00
YuQing 39e5dd419e custom define binlog rotate file size 2022-05-30 11:24:05 +08:00
YuQing 464573f9ff sf_file_writer_set_indexes impl. 2022-05-26 20:14:00 +08:00
YuQing f490366c03 sf_binlog_writer_change_write_index impl. 2022-05-19 18:14:33 +08:00
YuQing 353dde7059 add macro SF_ERROR_EINPROGRESS 2022-05-18 10:39:23 +08:00
YuQing 89a39e85d3 add macro SF_CLUSTER_ERROR_BINLOG_MISSED 2022-05-17 14:58:15 +08:00
YuQing c717646593 sf_file_writer_get_indexes impl. 2022-05-14 16:27:09 +08:00
YuQing c611b9b30c sf_file_writer.[hc] support start_index 2022-05-14 14:21:05 +08:00
YuQing 077a68a974 add two macros for vote node 2022-05-09 16:20:21 +08:00
YuQing 09839f9bf4 log service_name field when connect or communicate error 2022-05-08 10:44:55 +08:00
YuQing 00faf7e637 add function sf_load_cluster_config1 2022-05-06 15:12:19 +08:00
YuQing 07bbf65847 sf_proto_get_server_status_pack use struct 2022-05-06 09:48:49 +08:00
YuQing b38bf00a28 proto get_server_status remove field service_id 2022-04-30 10:20:30 +08:00
YuQing 7f92190c87 add SF_CLUSTER_PROTO_GET_SERVER_STATUS_REQ/RESP 2022-04-29 14:37:59 +08:00
vazmin 312b7752ef gh actions: upgrade to 1.1.14-1 2022-04-28 11:54:26 +00:00
YuQing 3734e68e0b sf_binlog_writer_finish check thread running 2022-04-24 10:57:19 +08:00
YuQing dcd024019b Merge branch 'recovery_and_balance' 2022-04-24 08:26:18 +08:00
YuQing a29ac30f67 upgrade version to V1.1.14 2022-04-22 14:57:26 +08:00
YuQing 78e321f4ad election quorum support sf_election_quorum_auto 2022-04-21 11:29:43 +08:00
YuQing 613c31fcf3 sf_binlog_writer_change_order_by check if versioned writer 2022-04-18 08:59:07 +08:00
YuQing 952647cbc9 order_by feature belongs to writer instead of thread 2022-04-17 18:18:18 +08:00
YuQing a57709de93 sf_connection_manager.c: make_master_connection refined 2022-04-15 16:58:42 +08:00
YuQing 7259eaf6ac log retry count when get connection fail 2022-04-11 10:24:21 +08:00
vazmin 3dd9313dc2 debian update substvars format 2022-04-05 00:37:34 +08:00
vazmin 9e77dac94b feat use debian/substvars 2022-04-04 14:35:52 +08:00
YuQing de943f684a add function sf_load_quorum_config 2022-03-30 21:22:34 +08:00
YuQing b4aaf69962 sf_buffered_writer.h: compile OK. 2022-03-25 15:30:14 +08:00
YuQing 68d41aa690 rename to sf_file_writer_deal_versioned_buffer 2022-03-22 08:23:03 +08:00
YuQing 1a03fec1f6 add function sf_file_writer_get_binlog_index 2022-03-19 16:36:11 +08:00
YuQing a727f382bc add function: sf_binlog_writer_notify_exit 2022-03-18 16:48:26 +08:00
YuQing a265bbbbea add function sf_binlog_writer_destroy 2022-03-17 20:52:41 +08:00
YuQing e061a3dfad add file src/sf_buffered_writer.h 2022-03-16 11:48:00 +08:00
vazmin 246ff83225 debian: add changelog 1.1.13-1 2022-03-13 17:19:01 +08:00
YuQing d129c6151e add function sf_binlog_writer_get_index_filename 2022-03-08 17:13:29 +08:00
YuQing 25ca590416 make.sh: change DEBUG_FLAG to 0 2022-03-06 19:41:16 +08:00
YuQing 2bcf2428e1 upgrade version to 1.1.13 2022-03-03 10:16:00 +08:00
YuQing 8de3678e86 sf_load_global_config_ex: server_name can be NULL 2022-03-03 10:00:29 +08:00
YuQing 6549172c67 support function sf_sharding_htable_delete 2022-02-27 15:57:32 +08:00
YuQing 3e4ddce4a2 add function sf_load_global_base_path 2022-02-26 10:05:25 +08:00
YuQing 1ba160b6d7 change log level to debug 2022-02-25 09:45:40 +08:00
YuQing 9159d9c24b simple_hash rename to fc_simple_hash 2022-02-09 22:39:40 +08:00
YuQing 2d177ab262 sf_iov.[hc] add function sf_iova_memcpy_ex 2022-02-14 10:26:25 +08:00
YuQing 52e34ca393 sf_iova_memset_ex: add const modifier 2022-02-04 15:51:08 +08:00
YuQing fa9e00f3b8 sf_iova_memset_ex for iov and iovcnt 2022-02-04 15:37:59 +08:00
YuQing 5796655ce0 sf_iova_memset impl. 2022-02-04 15:22:11 +08:00
YuQing 23ff87dea0 iova_slice error detect 2022-02-04 10:59:00 +08:00
YuQing a46945b6cd add files: sf_iov.[hc] 2022-02-03 22:30:19 +08:00
vazmin 1adfb10c63 upgrade version to 1.1.12 2022-01-15 20:16:06 +08:00
YuQing cfd7690f4e upgrade version to 1.1.12 2022-01-13 10:07:41 +08:00
YuQing 16f5b42b95 sf_synchronize_counter_wait: check SF_G_CONTINUE_FLAG 2022-01-12 07:05:27 +08:00
vazmin 1a06ea13e7 update debian package version 2021-12-27 21:50:18 +08:00
57 changed files with 6628 additions and 1733 deletions

1
.gitignore vendored
View File

@ -31,3 +31,4 @@ src/Makefile
# other # other
*.swp *.swp
*.swo

193
debian/changelog vendored
View File

@ -1,3 +1,196 @@
libserverframe (1.2.11-1) unstable; urgency=medium
* upgrade to 1.2.11-1
-- YuQing <384681@qq.com> Sun, 23 Nov 2025 10:48:22 +0000
libserverframe (1.2.11-1) unstable; urgency=medium
* upgrade to 1.2.11-1
-- YuQing <384681@qq.com> Sun, 23 Nov 2025 10:00:56 +0000
libserverframe (1.2.11-1) unstable; urgency=medium
* upgrade to 1.2.11-1
-- YuQing <384681@qq.com> Sun, 23 Nov 2025 09:06:43 +0000
libserverframe (1.2.8-1) unstable; urgency=medium
* upgrade to 1.2.8-1
-- YuQing <384681@qq.com> Sat, 16 Aug 2025 16:32:03 +0000
libserverframe (1.2.7-1) unstable; urgency=medium
* upgrade to 1.2.7-1
-- YuQing <384681@qq.com> Sun, 06 Apr 2025 16:56:40 +0000
libserverframe (1.2.5-1) unstable; urgency=medium
* upgrade to 1.2.5-1
-- YuQing <384681@qq.com> Sun, 29 Sep 2024 15:24:39 +0000
libserverframe (1.2.4-1) unstable; urgency=medium
* upgrade to 1.2.4-1
-- YuQing <384681@qq.com> Sat, 15 Jun 2024 14:45:44 +0000
libserverframe (1.2.3-1) unstable; urgency=medium
* upgrade to 1.2.3-1
-- YuQing <384681@qq.com> Sun, 17 Mar 2024 15:11:04 +0000
libserverframe (1.2.2-1) unstable; urgency=medium
* upgrade to 1.2.2-1
-- YuQing <384681@qq.com> Wed, 31 Jan 2024 12:00:10 +0000
libserverframe (1.2.1-1) unstable; urgency=medium
* upgrade to 1.2.1-1
-- YuQing <384681@qq.com> Mon, 01 Jan 2024 11:24:45 +0000
libserverframe (1.2.0-3) unstable; urgency=medium
* upgrade to 1.2.0-3
-- YuQing <384681@qq.com> Tue, 21 Nov 2023 14:36:16 +0000
libserverframe (1.2.0-2) unstable; urgency=medium
* upgrade to 1.2.0-2
-- YuQing <384681@qq.com> Mon, 20 Nov 2023 13:24:02 +0000
libserverframe (1.2.0-1) unstable; urgency=medium
* upgrade to 1.2.0-1
-- YuQing <384681@qq.com> Sun, 19 Nov 2023 14:46:16 +0000
libserverframe (1.1.29-1) unstable; urgency=medium
* upgrade to 1.1.29-1
-- YuQing <384681@qq.com> Sun, 06 Aug 2023 07:22:46 +0000
libserverframe (1.1.28-1) unstable; urgency=medium
* upgrade to 1.1.28-1
-- YuQing <384681@qq.com> Sun, 23 Jul 2023 14:28:20 +0000
libserverframe (1.1.27-1) unstable; urgency=medium
* upgrade to 1.1.27-1
-- YuQing <384681@qq.com> Sat, 24 Jun 2023 06:51:30 +0000
libserverframe (1.1.26-1) unstable; urgency=medium
* upgrade to 1.1.26-1
-- YuQing <384681@qq.com> Sun, 04 Jun 2023 10:52:11 +0000
libserverframe (1.1.25-1) unstable; urgency=medium
* upgrade to 1.1.25-1
-- YuQing <384681@qq.com> Sat, 18 Feb 2023 05:44:50 +0000
libserverframe (1.1.24-1) unstable; urgency=medium
* upgrade to 1.1.24-1
-- YuQing <384681@qq.com> Sun, 15 Jan 2023 13:50:15 +0000
libserverframe (1.1.22-1) unstable; urgency=medium
* upgrade to 1.1.22-1
-- YuQing <384681@qq.com> Mon, 21 Nov 2022 14:55:50 +0000
libserverframe (1.1.21-1) unstable; urgency=medium
* upgrade to 1.1.21-1
-- YuQing <384681@qq.com> Sat, 08 Oct 2022 13:28:40 +0000
libserverframe (1.1.20-1) unstable; urgency=medium
* upgrade to 1.1.20-1
-- YuQing <384681@qq.com> Thu, 22 Sep 2022 12:22:39 +0000
libserverframe (1.1.18-1) unstable; urgency=medium
* upgrade to 1.1.18-1
-- YuQing <384681@qq.com> Wed, 07 Sep 2022 13:36:38 +0000
libserverframe (1.1.17-1) unstable; urgency=medium
* upgrade to 1.1.17-1
-- YuQing <384681@qq.com> Mon, 15 Aug 2022 13:31:54 +0000
libserverframe (1.1.16-1) unstable; urgency=medium
* upgrade to 1.1.16-1
-- YuQing <384681@qq.com> Mon, 25 Jul 2022 13:52:09 +0000
libserverframe (1.1.15-1) unstable; urgency=medium
* upgrade to 1.1.15-1
-- YuQing <384681@qq.com> Wed, 15 Jun 2022 14:26:27 +0000
libserverframe (1.1.14-1) unstable; urgency=medium
* upgrade to 1.1.14-1
-- YuQing <384681@qq.com> Thu, 28 Apr 2022 11:54:26 +0000
libserverframe (1.1.13-1) unstable; urgency=medium
* add files: sf_iov.[hc]
* iova_slice error detect
* sf_iova_memset impl.
* sf_iova_memset_ex for iov and iovcnt
* sf_iova_memset_ex: add const modifier
* sf_iov.[hc] add function sf_iova_memcpy_ex
* simple_hash rename to fc_simple_hash
* change log level to debug
* add function sf_load_global_base_path
* support function sf_sharding_htable_delete
* sf_load_global_config_ex: server_name can be NULL
* upgrade version to 1.1.13
* make.sh: change DEBUG_FLAG to 0
* add function sf_binlog_writer_get_index_filename
-- YuQing <384681@qq.com> Sun, 13 Mar 2022 16:46:17 +0800
libserverframe (1.1.12-1) unstable; urgency=medium
* upgrade version to 1.1.12
-- YuQing <384681@qq.com> Sat, 15 Jan 2022 20:00:21 +0800
libserverframe (1.1.11-1) unstable; urgency=medium
* upgrade version to 1.1.11
-- YuQing <384681@qq.com> Sun, 26 Dec 2021 21:02:05 +0800
libserverframe (1.1.10-1) unstable; urgency=medium libserverframe (1.1.10-1) unstable; urgency=medium
* fixed somebugs * fixed somebugs

6
debian/control vendored
View File

@ -3,7 +3,7 @@ Section: net
Priority: optional Priority: optional
Maintainer: YuQing <384681@qq.com> Maintainer: YuQing <384681@qq.com>
Build-Depends: debhelper (>=11~) Build-Depends: debhelper (>=11~)
, libfastcommon-dev (>= 1.0.52) , libfastcommon-dev (>= 1.0.56)
Standards-Version: 4.1.4 Standards-Version: 4.1.4
Homepage: https://github.com/happyfish100/libserverframe Homepage: https://github.com/happyfish100/libserverframe
@ -12,7 +12,7 @@ Section: net
Architecture: any Architecture: any
Multi-Arch: same Multi-Arch: same
Pre-Depends: ${misc:Pre-Depends} Pre-Depends: ${misc:Pre-Depends}
Depends: ${misc:Depends}, ${shlibs:Depends}, libserverframe (= ${binary:Version}) Depends: ${misc:Depends}, libserverframe (= ${binary:Version})
Description: libserverframe (development files) Description: libserverframe (development files)
This package contains header files. This package contains header files.
@ -21,5 +21,5 @@ Section: net
Architecture: any Architecture: any
Multi-Arch: same Multi-Arch: same
Pre-Depends: ${misc:Pre-Depends} Pre-Depends: ${misc:Pre-Depends}
Depends: ${misc:Depends}, ${shlibs:Depends}, libfastcommon (>= 1.0.52) Depends: ${misc:Depends}, ${shlibs:Depends}, libfastcommon (>= ${libfastcommon:Version})
Description: this network service framework library extract from FastDFS Description: this network service framework library extract from FastDFS

73
debian/copyright vendored
View File

@ -2,45 +2,7 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: libserverframe Upstream-Name: libserverframe
Source: https://github.com/happyfish100/libserverframe Source: https://github.com/happyfish100/libserverframe
Files: src/idempotency/client/client_channel.c Files: *
src/idempotency/client/client_channel.h
src/idempotency/client/client_types.h
src/idempotency/client/receipt_handler.c
src/idempotency/client/receipt_handler.h
src/idempotency/client/rpc_wrapper.h
src/idempotency/server/channel_htable.c
src/idempotency/server/channel_htable.h
src/idempotency/server/request_htable.c
src/idempotency/server/request_htable.h
src/idempotency/server/server_channel.c
src/idempotency/server/server_channel.h
src/idempotency/server/server_handler.c
src/idempotency/server/server_handler.h
src/idempotency/server/server_types.h
src/sf_binlog_writer.c
src/sf_binlog_writer.h
src/sf_cluster_cfg.c
src/sf_cluster_cfg.h
src/sf_configs.c
src/sf_configs.h
src/sf_connection_manager.c
src/sf_connection_manager.h
src/sf_define.h
src/sf_func.c
src/sf_func.h
src/sf_global.c
src/sf_global.h
src/sf_nio.c
src/sf_nio.h
src/sf_proto.c
src/sf_proto.h
src/sf_service.c
src/sf_service.h
src/sf_sharding_htable.c
src/sf_sharding_htable.h
src/sf_types.h
src/sf_util.c
src/sf_util.h
Copyright: 2020 YuQing <384681@qq.com> Copyright: 2020 YuQing <384681@qq.com>
License: AGPL-3.0+ License: AGPL-3.0+
This program is free software: you can use, redistribute, and/or modify This program is free software: you can use, redistribute, and/or modify
@ -54,39 +16,6 @@ License: AGPL-3.0+
You should have received a copy of the GNU Affero General Public License You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>. along with this program. If not, see <https://www.gnu.org/licenses/>.
Files: .gitignore
README.md
libserverframe.spec
make.sh
sample.conf
src/Makefile
src/Makefile.in
src/idempotency/client/client_channel.lo
src/idempotency/client/receipt_handler.lo
src/idempotency/server/channel_htable.lo
src/idempotency/server/request_htable.lo
src/idempotency/server/server_channel.lo
src/idempotency/server/server_handler.lo
src/libserverframe.so
src/sf_binlog_writer.lo
src/sf_cluster_cfg.lo
src/sf_configs.lo
src/sf_connection_manager.lo
src/sf_func.lo
src/sf_global.lo
src/sf_nio.lo
src/sf_proto.lo
src/sf_service.lo
src/sf_sharding_htable.lo
src/sf_util.lo
Copyright: __NO_COPYRIGHT_NOR_LICENSE__
License: __NO_COPYRIGHT_NOR_LICENSE__
#----------------------------------------------------------------------------
# Files marked as NO_LICENSE_TEXT_FOUND may be covered by the following
# license/copyright files.
#----------------------------------------------------------------------------
# License file: LICENSE # License file: LICENSE
GNU AFFERO GENERAL PUBLIC LICENSE GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007 Version 3, 19 November 2007

View File

@ -1 +1 @@
usr/lib64/libserverframe.so* usr/lib/ usr/lib/libserverframe.so*

4
debian/rules vendored Normal file → Executable file
View File

@ -11,3 +11,7 @@ override_dh_auto_build:
override_dh_auto_install: override_dh_auto_install:
./make.sh install ./make.sh install
dh_auto_install dh_auto_install
.PHONY: override_dh_gencontrol
override_dh_gencontrol:
dh_gencontrol -- -Tdebian/substvars

1
debian/substvars vendored Normal file
View File

@ -0,0 +1 @@
libfastcommon:Version=1.0.83

View File

@ -2,7 +2,7 @@
%define CommitVersion %(echo $COMMIT_VERSION) %define CommitVersion %(echo $COMMIT_VERSION)
Name: libserverframe Name: libserverframe
Version: 1.1.11 Version: 1.2.11
Release: 1%{?dist} Release: 1%{?dist}
Summary: network framework library Summary: network framework library
License: AGPL v3.0 License: AGPL v3.0
@ -12,9 +12,9 @@ Source: http://github.com/happyfish100/libserverframe/%{name}-%{version}.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
BuildRequires: libfastcommon-devel >= 1.0.54 BuildRequires: libfastcommon-devel >= 1.0.83
Requires: %__cp %__mv %__chmod %__grep %__mkdir %__install %__id Requires: %__cp %__mv %__chmod %__grep %__mkdir %__install %__id
Requires: libfastcommon >= 1.0.54 Requires: libfastcommon >= 1.0.83
%description %description
common framework library common framework library

42
make.sh
View File

@ -5,18 +5,32 @@ TARGET_PREFIX=$DESTDIR/usr
TARGET_CONF_PATH=$DESTDIR/etc TARGET_CONF_PATH=$DESTDIR/etc
LIB_VERSION=lib64 LIB_VERSION=lib64
DEBUG_FLAG=1 DEBUG_FLAG=0
if [ -f /usr/include/fastcommon/_os_define.h ]; then if [ -f /usr/include/fastcommon/_os_define.h ]; then
OS_BITS=$(fgrep OS_BITS /usr/include/fastcommon/_os_define.h | awk '{print $NF;}') OS_BITS=$(grep -F OS_BITS /usr/include/fastcommon/_os_define.h | awk '{print $NF;}')
USE_URING=$(grep -F IOEVENT_USE_URING /usr/include/fastcommon/_os_define.h | awk '{print $NF;}')
elif [ -f /usr/local/include/fastcommon/_os_define.h ]; then elif [ -f /usr/local/include/fastcommon/_os_define.h ]; then
OS_BITS=$(fgrep OS_BITS /usr/local/include/fastcommon/_os_define.h | awk '{print $NF;}') OS_BITS=$(grep -F OS_BITS /usr/local/include/fastcommon/_os_define.h | awk '{print $NF;}')
USE_URING=$(grep -F IOEVENT_USE_URING /usr/local/include/fastcommon/_os_define.h | awk '{print $NF;}')
else else
OS_BITS=64 OS_BITS=64
USE_URING=''
fi fi
uname=$(uname)
if [ "$OS_BITS" -eq 64 ]; then if [ "$OS_BITS" -eq 64 ]; then
if [ $uname = 'Linux' ]; then
osname=$(cat /etc/os-release | grep -w NAME | awk -F '=' '{print $2;}' | \
awk -F '"' '{if (NF==3) {print $2} else {print $1}}' | awk '{print $1}')
if [ $osname = 'Ubuntu' -o $osname = 'Debian' ]; then
LIB_VERSION=lib
else
LIB_VERSION=lib64 LIB_VERSION=lib64
fi
else
LIB_VERSION=lib
fi
else else
LIB_VERSION=lib LIB_VERSION=lib
fi fi
@ -31,13 +45,16 @@ CFLAGS="$CFLAGS -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE"
if [ "$DEBUG_FLAG" = "1" ]; then if [ "$DEBUG_FLAG" = "1" ]; then
CFLAGS="$CFLAGS -g -O1 -DDEBUG_FLAG" CFLAGS="$CFLAGS -g -O1 -DDEBUG_FLAG"
else else
CFLAGS="$CFLAGS -O3" CFLAGS="$CFLAGS -g -O3"
fi fi
LIBS='' LIBS=''
uname=$(uname) uname=$(uname)
if [ "$uname" = "Linux" ]; then if [ "$uname" = "Linux" ]; then
CFLAGS="$CFLAGS" CFLAGS="$CFLAGS"
if [ -n "$USE_URING" ]; then
LIBS="$LIBS -luring"
fi
elif [ "$uname" = "FreeBSD" ] || [ "$uname" = "Darwin" ]; then elif [ "$uname" = "FreeBSD" ] || [ "$uname" = "Darwin" ]; then
CFLAGS="$CFLAGS" CFLAGS="$CFLAGS"
if [ "$uname" = "Darwin" ]; then if [ "$uname" = "Darwin" ]; then
@ -66,19 +83,19 @@ elif [ "$uname" = "HP-UX" ]; then
fi fi
elif [ "$uname" = "FreeBSD" ]; then elif [ "$uname" = "FreeBSD" ]; then
if [ -f /usr/lib/libc_r.so ]; then if [ -f /usr/lib/libc_r.so ]; then
line=$(nm -D /usr/lib/libc_r.so | grep pthread_create | grep -w T) line=$(nm -D /usr/lib/libc_r.so | grep -F pthread_create | grep -w T)
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
LIBS="$LIBS -lc_r" LIBS="$LIBS -lc_r"
have_pthread=1 have_pthread=1
fi fi
elif [ -f /lib64/libc_r.so ]; then elif [ -f /lib64/libc_r.so ]; then
line=$(nm -D /lib64/libc_r.so | grep pthread_create | grep -w T) line=$(nm -D /lib64/libc_r.so | grep -F pthread_create | grep -w T)
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
LIBS="$LIBS -lc_r" LIBS="$LIBS -lc_r"
have_pthread=1 have_pthread=1
fi fi
elif [ -f /usr/lib64/libc_r.so ]; then elif [ -f /usr/lib64/libc_r.so ]; then
line=$(nm -D /usr/lib64/libc_r.so | grep pthread_create | grep -w T) line=$(nm -D /usr/lib64/libc_r.so | grep -F pthread_create | grep -w T)
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
LIBS="$LIBS -lc_r" LIBS="$LIBS -lc_r"
have_pthread=1 have_pthread=1
@ -87,7 +104,7 @@ elif [ "$uname" = "FreeBSD" ]; then
fi fi
if [ $have_pthread -eq 0 ] && [ "$uname" = "Linux" ]; then if [ $have_pthread -eq 0 ] && [ "$uname" = "Linux" ]; then
/sbin/ldconfig -p | fgrep libpthread.so > /dev/null /sbin/ldconfig -p | grep -w libpthread.so > /dev/null
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
LIBS="$LIBS -lpthread" LIBS="$LIBS -lpthread"
else else
@ -113,8 +130,15 @@ sed_replace()
fi fi
} }
cd src cd src/include
link=$(readlink sf)
if [ $? -ne 0 ] || [ "$link" != '..' -a "$link" != '../' ]; then
ln -sf .. sf
fi
cd ..
cp Makefile.in Makefile cp Makefile.in Makefile
sed_replace "s#\\\$(CC)#gcc#g" Makefile
sed_replace "s#\\\$(CFLAGS)#$CFLAGS#g" Makefile sed_replace "s#\\\$(CFLAGS)#$CFLAGS#g" Makefile
sed_replace "s#\\\$(LIBS)#$LIBS#g" Makefile sed_replace "s#\\\$(LIBS)#$LIBS#g" Makefile
sed_replace "s#\\\$(TARGET_PREFIX)#$TARGET_PREFIX#g" Makefile sed_replace "s#\\\$(TARGET_PREFIX)#$TARGET_PREFIX#g" Makefile

View File

@ -6,16 +6,20 @@ LIB_PATH = $(LIBS) -lfastcommon
TARGET_LIB = $(TARGET_PREFIX)/$(LIB_VERSION) TARGET_LIB = $(TARGET_PREFIX)/$(LIB_VERSION)
TOP_HEADERS = sf_types.h sf_global.h sf_define.h sf_nio.h sf_service.h \ TOP_HEADERS = sf_types.h sf_global.h sf_define.h sf_nio.h sf_service.h \
sf_func.h sf_util.h sf_configs.h sf_proto.h sf_binlog_writer.h \ sf_func.h sf_util.h sf_configs.h sf_proto.h sf_cluster_cfg.h \
sf_cluster_cfg.h sf_sharding_htable.h sf_connection_manager.h \ sf_sharding_htable.h sf_connection_manager.h sf_serializer.h \
sf_serializer.h sf_binlog_index.h sf_file_writer.h \ sf_binlog_index.h sf_file_writer.h sf_binlog_writer.h \
sf_ordered_writer.h sf_ordered_writer.h sf_buffered_writer.h sf_iov.h \
sf_shared_mbuffer.h
IDEMP_COMMON_HEADER = idempotency/common/idempotency_types.h
IDEMP_SERVER_HEADER = idempotency/server/server_types.h \ IDEMP_SERVER_HEADER = idempotency/server/server_types.h \
idempotency/server/server_channel.h \ idempotency/server/server_channel.h \
idempotency/server/request_htable.h \ idempotency/server/request_htable.h \
idempotency/server/channel_htable.h \ idempotency/server/channel_htable.h \
idempotency/server/server_handler.h idempotency/server/server_handler.h \
idempotency/server/request_metadata.h
IDEMP_CLIENT_HEADER = idempotency/client/client_types.h \ IDEMP_CLIENT_HEADER = idempotency/client/client_types.h \
idempotency/client/receipt_handler.h \ idempotency/client/receipt_handler.h \
@ -24,16 +28,18 @@ IDEMP_CLIENT_HEADER = idempotency/client/client_types.h \
ALL_HEADERS = $(TOP_HEADERS) $(IDEMP_SERVER_HEADER) $(IDEMP_CLIENT_HEADER) ALL_HEADERS = $(TOP_HEADERS) $(IDEMP_SERVER_HEADER) $(IDEMP_CLIENT_HEADER)
SHARED_OBJS = sf_nio.lo sf_service.lo sf_global.lo \ SHARED_OBJS = sf_nio.lo sf_iov.lo sf_service.lo sf_global.lo \
sf_func.lo sf_util.lo sf_configs.lo sf_proto.lo \ sf_func.lo sf_util.lo sf_configs.lo sf_proto.lo \
sf_binlog_writer.lo sf_sharding_htable.lo \ sf_sharding_htable.lo sf_cluster_cfg.lo \
sf_cluster_cfg.lo sf_connection_manager.lo \ sf_connection_manager.lo sf_serializer.lo \
sf_serializer.lo sf_binlog_index.lo \ sf_binlog_index.lo sf_file_writer.lo \
sf_file_writer.lo sf_ordered_writer.lo \ sf_binlog_writer.lo sf_ordered_writer.lo \
sf_shared_mbuffer.lo \
idempotency/server/server_channel.lo \ idempotency/server/server_channel.lo \
idempotency/server/request_htable.lo \ idempotency/server/request_htable.lo \
idempotency/server/channel_htable.lo \ idempotency/server/channel_htable.lo \
idempotency/server/server_handler.lo \ idempotency/server/server_handler.lo \
idempotency/server/request_metadata.lo \
idempotency/client/receipt_handler.lo \ idempotency/client/receipt_handler.lo \
idempotency/client/client_channel.lo idempotency/client/client_channel.lo
@ -55,16 +61,18 @@ libserverframe.so: $(SHARED_OBJS)
install: install:
mkdir -p $(TARGET_LIB) mkdir -p $(TARGET_LIB)
mkdir -p $(TARGET_PREFIX)/lib mkdir -p $(TARGET_PREFIX)/lib
mkdir -p $(TARGET_PREFIX)/include/sf/idempotency/common
mkdir -p $(TARGET_PREFIX)/include/sf/idempotency/server mkdir -p $(TARGET_PREFIX)/include/sf/idempotency/server
mkdir -p $(TARGET_PREFIX)/include/sf/idempotency/client mkdir -p $(TARGET_PREFIX)/include/sf/idempotency/client
install -m 755 $(ALL_LIBS) $(TARGET_LIB) install -m 755 $(ALL_LIBS) $(TARGET_LIB)
cp -f $(TOP_HEADERS) $(TARGET_PREFIX)/include/sf cp -f $(TOP_HEADERS) $(TARGET_PREFIX)/include/sf
cp -f $(IDEMP_COMMON_HEADER) $(TARGET_PREFIX)/include/sf/idempotency/common
cp -f $(IDEMP_SERVER_HEADER) $(TARGET_PREFIX)/include/sf/idempotency/server cp -f $(IDEMP_SERVER_HEADER) $(TARGET_PREFIX)/include/sf/idempotency/server
cp -f $(IDEMP_CLIENT_HEADER) $(TARGET_PREFIX)/include/sf/idempotency/client cp -f $(IDEMP_CLIENT_HEADER) $(TARGET_PREFIX)/include/sf/idempotency/client
@BUILDROOT=$$(echo "$(TARGET_PREFIX)" | grep BUILDROOT); \ @BUILDROOT=$$(echo "$(TARGET_PREFIX)" | grep BUILDROOT); \
if [ -z "$$BUILDROOT" ] && [ ! -e $(TARGET_PREFIX)/lib/libserverframe.so ]; then ln -s $(TARGET_LIB)/libserverframe.so $(TARGET_PREFIX)/lib/libserverframe.so; fi if [ -z "$$BUILDROOT" ] && [ "$(TARGET_LIB)" != "$(TARGET_PREFIX)/lib" ]; then ln -sf $(TARGET_LIB)/libserverframe.so $(TARGET_PREFIX)/lib/libserverframe.so; fi
clean: clean:
rm -f $(ALL_OBJS) $(ALL_LIBS) $(ALL_PRGS) rm -f $(ALL_OBJS) $(ALL_LIBS) $(ALL_PRGS)

View File

@ -31,11 +31,11 @@
#include "fastcommon/pthread_func.h" #include "fastcommon/pthread_func.h"
#include "fastcommon/sched_thread.h" #include "fastcommon/sched_thread.h"
#include "fastcommon/fc_queue.h" #include "fastcommon/fc_queue.h"
#include "../../sf_util.h" #include "sf/sf_util.h"
#include "../../sf_func.h" #include "sf/sf_func.h"
#include "../../sf_nio.h" #include "sf/sf_nio.h"
#include "../../sf_global.h" #include "sf/sf_global.h"
#include "../../sf_service.h" #include "sf/sf_service.h"
#include "client_channel.h" #include "client_channel.h"
typedef struct { typedef struct {
@ -135,7 +135,7 @@ static int idempotency_channel_alloc_init(void *element, void *args)
return result; return result;
} }
if ((result=init_pthread_lock_cond_pair(&channel->lc_pair)) != 0) { if ((result=init_pthread_lock_cond_pair(&channel->lcp)) != 0) {
return result; return result;
} }
@ -171,17 +171,30 @@ void client_channel_destroy()
} }
static struct fast_task_info *alloc_channel_task(IdempotencyClientChannel static struct fast_task_info *alloc_channel_task(IdempotencyClientChannel
*channel, const uint32_t hash_code, const char *server_ip, *channel, const uint32_t hash_code, const FCCommunicationType comm_type,
const uint16_t port, int *err_no) const char *server_ip, const uint16_t port, int *err_no)
{ {
struct fast_task_info *task; struct fast_task_info *task;
SFAddressFamilyHandler *fh;
SFNetworkHandler *handler;
if ((task=sf_alloc_init_task(&g_sf_context, -1)) == NULL) { if (is_ipv6_addr(server_ip)) {
fh = g_sf_context.handlers + SF_IPV6_ADDRESS_FAMILY_INDEX;
} else {
fh = g_sf_context.handlers + SF_IPV4_ADDRESS_FAMILY_INDEX;
}
if (comm_type == fc_comm_type_sock) {
handler = fh->handlers + SF_SOCKET_NETWORK_HANDLER_INDEX;
} else {
handler = fh->handlers + SF_RDMACM_NETWORK_HANDLER_INDEX;
}
if ((task=sf_alloc_init_task(handler, -1)) == NULL) {
*err_no = ENOMEM; *err_no = ENOMEM;
return NULL; return NULL;
} }
snprintf(task->server_ip, sizeof(task->server_ip), "%s", server_ip); fc_safe_strcpy(task->server_ip, server_ip);
task->port = port; task->port = port;
task->arg = channel; task->arg = channel;
task->thread_data = g_sf_context.thread_data + task->thread_data = g_sf_context.thread_data +
@ -190,7 +203,8 @@ static struct fast_task_info *alloc_channel_task(IdempotencyClientChannel
channel->last_connect_time = g_current_time; channel->last_connect_time = g_current_time;
if ((*err_no=sf_nio_notify(task, SF_NIO_STAGE_CONNECT)) != 0) { if ((*err_no=sf_nio_notify(task, SF_NIO_STAGE_CONNECT)) != 0) {
channel->in_ioevent = 0; //rollback channel->in_ioevent = 0; //rollback
sf_release_task(task); __sync_sub_and_fetch(&task->reffer_count, 1);
free_queue_push(task);
return NULL; return NULL;
} }
return task; return task;
@ -200,6 +214,15 @@ int idempotency_client_channel_check_reconnect(
IdempotencyClientChannel *channel) IdempotencyClientChannel *channel)
{ {
int result; int result;
char formatted_ip[FORMATTED_IP_SIZE];
#if IOEVENT_USE_URING
struct fast_task_info *task;
task = channel->task;
if (SF_CTX->use_io_uring && FC_ATOMIC_GET(task->reffer_count) > 1) {
return 0;
}
#endif
if (!__sync_bool_compare_and_swap(&channel->in_ioevent, 0, 1)) { if (!__sync_bool_compare_and_swap(&channel->in_ioevent, 0, 1)) {
return 0; return 0;
@ -210,11 +233,16 @@ int idempotency_client_channel_check_reconnect(
channel->last_connect_time = g_current_time; channel->last_connect_time = g_current_time;
} }
if (FC_LOG_BY_LEVEL(LOG_DEBUG)) {
format_ip_address(channel->task->server_ip, formatted_ip);
logDebug("file: "__FILE__", line: %d, " logDebug("file: "__FILE__", line: %d, "
"trigger connect to server %s:%u", "trigger connect to server %s:%u", __LINE__,
__LINE__, channel->task->server_ip, formatted_ip, channel->task->port);
channel->task->port); }
if (channel->task->event.fd >= 0) {
channel->task->handler->close_connection(channel->task);
}
__sync_bool_compare_and_swap(&channel->task->canceled, 1, 0); __sync_bool_compare_and_swap(&channel->task->canceled, 1, 0);
if ((result=sf_nio_notify(channel->task, SF_NIO_STAGE_CONNECT)) == 0) { if ((result=sf_nio_notify(channel->task, SF_NIO_STAGE_CONNECT)) == 0) {
channel->last_connect_time = g_current_time; channel->last_connect_time = g_current_time;
@ -226,8 +254,8 @@ int idempotency_client_channel_check_reconnect(
} }
struct idempotency_client_channel *idempotency_client_channel_get( struct idempotency_client_channel *idempotency_client_channel_get(
const char *server_ip, const uint16_t server_port, const FCCommunicationType comm_type, const char *server_ip,
const int timeout, int *err_no) const uint16_t server_port, const int timeout, int *err_no)
{ {
int r; int r;
int key_len; int key_len;
@ -239,8 +267,11 @@ struct idempotency_client_channel *idempotency_client_channel_get(
IdempotencyClientChannel *current; IdempotencyClientChannel *current;
IdempotencyClientChannel *channel; IdempotencyClientChannel *channel;
key_len = snprintf(key, sizeof(key), "%s_%u", server_ip, server_port); key_len = strlen(server_ip);
hash_code = simple_hash(key, key_len); memcpy(key, server_ip, key_len);
*(key + key_len++) = '-';
key_len += fc_itoa(server_port, key + key_len);
hash_code = fc_simple_hash(key, key_len);
bucket = channel_context.htable.buckets + bucket = channel_context.htable.buckets +
hash_code % channel_context.htable.capacity; hash_code % channel_context.htable.capacity;
previous = NULL; previous = NULL;
@ -277,8 +308,8 @@ struct idempotency_client_channel *idempotency_client_channel_get(
break; break;
} }
channel->task = alloc_channel_task(channel, channel->task = alloc_channel_task(channel, hash_code,
hash_code, server_ip, server_port, err_no); comm_type, server_ip, server_port, err_no);
if (channel->task == NULL) { if (channel->task == NULL) {
fast_mblock_free_object(&channel_context. fast_mblock_free_object(&channel_context.
channel_allocator, channel); channel_allocator, channel);
@ -323,8 +354,8 @@ int idempotency_client_channel_push(struct idempotency_client_channel *channel,
receipt->req_id = req_id; receipt->req_id = req_id;
fc_queue_push_ex(&channel->queue, receipt, &notify); fc_queue_push_ex(&channel->queue, receipt, &notify);
if (notify) { if (notify) {
if (__sync_add_and_fetch(&channel->in_ioevent, 0)) { if (FC_ATOMIC_GET(channel->in_ioevent)) {
if (__sync_add_and_fetch(&channel->established, 0)) { if (FC_ATOMIC_GET(channel->established)) {
sf_nio_notify(channel->task, SF_NIO_STAGE_CONTINUE); sf_nio_notify(channel->task, SF_NIO_STAGE_CONTINUE);
} }
} else { } else {

View File

@ -22,6 +22,7 @@
#include "fastcommon/pthread_func.h" #include "fastcommon/pthread_func.h"
#include "fastcommon/sched_thread.h" #include "fastcommon/sched_thread.h"
#include "fastcommon/fc_atomic.h" #include "fastcommon/fc_atomic.h"
#include "sf/sf_types.h"
#include "client_types.h" #include "client_types.h"
#ifdef __cplusplus #ifdef __cplusplus
@ -40,13 +41,14 @@ void idempotency_client_channel_config_to_string_ex(
char *output, const int size, const bool add_comma); char *output, const int size, const bool add_comma);
struct idempotency_client_channel *idempotency_client_channel_get( struct idempotency_client_channel *idempotency_client_channel_get(
const char *server_ip, const uint16_t server_port, const FCCommunicationType comm_type, const char *server_ip,
const int timeout, int *err_no); const uint16_t server_port, const int timeout, int *err_no);
static inline uint64_t idempotency_client_channel_next_seq_id( static inline uint64_t idempotency_client_channel_next_seq_id(
struct idempotency_client_channel *channel) struct idempotency_client_channel *channel)
{ {
return __sync_add_and_fetch(&channel->next_req_id, 1); return SF_IDEMPOTENCY_NEXT_REQ_ID(channel->server_id,
channel->id, FC_ATOMIC_INC(channel->next_seq));
} }
int idempotency_client_channel_push(struct idempotency_client_channel *channel, int idempotency_client_channel_push(struct idempotency_client_channel *channel,
@ -74,13 +76,28 @@ static inline void idempotency_client_channel_set_id_key(
static inline int idempotency_client_channel_check_wait_ex( static inline int idempotency_client_channel_check_wait_ex(
struct idempotency_client_channel *channel, const int timeout) struct idempotency_client_channel *channel, const int timeout)
{ {
if (__sync_add_and_fetch(&channel->established, 0)) { if (FC_ATOMIC_GET(channel->established)) {
return 0; return 0;
} }
idempotency_client_channel_check_reconnect(channel); idempotency_client_channel_check_reconnect(channel);
lcp_timedwait_sec(&channel->lc_pair, timeout); lcp_timedwait_sec(&channel->lcp, timeout);
return __sync_add_and_fetch(&channel->established, 0) ? 0 : ETIMEDOUT; if (FC_ATOMIC_GET(channel->established)) {
return 0;
} else {
/*
char formatted_ip[FORMATTED_IP_SIZE];
format_ip_address(channel->task->server_ip, formatted_ip);
logInfo("file: "__FILE__", line: %d, "
"channel_check fail, server %s:%u, in_ioevent: %d, "
"canceled: %d, req count: %"PRId64, __LINE__,
formatted_ip, channel->task->port,
__sync_add_and_fetch(&channel->in_ioevent, 0),
__sync_add_and_fetch(&channel->task->canceled, 0),
channel->task->req_count);
*/
return ETIMEDOUT;
}
} }
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -21,6 +21,7 @@
#include "fastcommon/fast_mblock.h" #include "fastcommon/fast_mblock.h"
#include "fastcommon/fc_list.h" #include "fastcommon/fc_list.h"
#include "fastcommon/fc_queue.h" #include "fastcommon/fc_queue.h"
#include "sf/idempotency/common/idempotency_types.h"
typedef struct idempotency_client_config { typedef struct idempotency_client_config {
bool enabled; bool enabled;
@ -40,11 +41,12 @@ typedef struct idempotency_client_channel {
volatile char in_ioevent; volatile char in_ioevent;
volatile char established; volatile char established;
int buffer_size; //the min task size of the server and mine int buffer_size; //the min task size of the server and mine
uint32_t server_id;
volatile uint32_t next_seq;
time_t last_connect_time; //for connect frequency control time_t last_connect_time; //for connect frequency control
time_t last_pkg_time; //last communication time time_t last_pkg_time; //last communication time
time_t last_report_time; //last report time for rpc receipt time_t last_report_time; //last report time for rpc receipt
pthread_lock_cond_pair_t lc_pair; //for channel valid check and notify pthread_lock_cond_pair_t lcp; //for channel valid check and notify
volatile uint64_t next_req_id;
struct fast_mblock_man receipt_allocator; struct fast_mblock_man receipt_allocator;
struct fast_task_info *task; struct fast_task_info *task;
struct fc_queue queue; struct fc_queue queue;
@ -61,6 +63,14 @@ typedef struct idempotency_receipt_thread_context {
} last_check_times; } last_check_times;
} IdempotencyReceiptThreadContext; } IdempotencyReceiptThreadContext;
typedef struct idempotency_receipt_global_vars {
struct {
int task_padding_size;
sf_init_connection_callback init_connection;
} rdma;
IdempotencyReceiptThreadContext *thread_contexts;
} IdempotencyReceiptGlobalVars;
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif

View File

@ -41,23 +41,35 @@
#include "client_channel.h" #include "client_channel.h"
#include "receipt_handler.h" #include "receipt_handler.h"
static IdempotencyReceiptThreadContext *receipt_thread_contexts = NULL; static IdempotencyReceiptGlobalVars receipt_global_vars;
static int receipt_init_task(struct fast_task_info *task) #define RECEIPT_THREAD_CONTEXTS receipt_global_vars.thread_contexts
#define TASK_PADDING_SIZE receipt_global_vars.rdma.task_padding_size
#define RDMA_INIT_CONNECTION receipt_global_vars.rdma.init_connection
static int receipt_init_task(struct fast_task_info *task, void *arg)
{ {
task->connect_timeout = SF_G_CONNECT_TIMEOUT; //for client side #if IOEVENT_USE_URING
task->network_timeout = SF_G_NETWORK_TIMEOUT; FC_URING_IS_CLIENT(task) = true;
#endif
if (RDMA_INIT_CONNECTION != NULL) {
return RDMA_INIT_CONNECTION(task, arg);
} else {
return 0; return 0;
}
} }
static int receipt_recv_timeout_callback(struct fast_task_info *task) static int receipt_recv_timeout_callback(struct fast_task_info *task)
{ {
IdempotencyClientChannel *channel; IdempotencyClientChannel *channel;
char formatted_ip[FORMATTED_IP_SIZE];
format_ip_address(task->server_ip, formatted_ip);
if (SF_NIO_TASK_STAGE_FETCH(task) == SF_NIO_STAGE_CONNECT) { if (SF_NIO_TASK_STAGE_FETCH(task) == SF_NIO_STAGE_CONNECT) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"connect to server %s:%u timeout", "connect to server %s:%u timeout",
__LINE__, task->server_ip, task->port); __LINE__, formatted_ip, task->port);
return ETIMEDOUT; return ETIMEDOUT;
} }
@ -65,11 +77,13 @@ static int receipt_recv_timeout_callback(struct fast_task_info *task)
if (channel->waiting_resp_qinfo.head != NULL) { if (channel->waiting_resp_qinfo.head != NULL) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"waiting receipt response from server %s:%u timeout", "waiting receipt response from server %s:%u timeout",
__LINE__, task->server_ip, task->port); __LINE__, formatted_ip, task->port);
} else { } else {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"communication with server %s:%u timeout", "%s server %s:%u timeout, channel established: %d",
__LINE__, task->server_ip, task->port); __LINE__, task->nio_stages.current == SF_NIO_STAGE_SEND ?
"send to" : "recv from", formatted_ip, task->port,
FC_ATOMIC_GET(channel->established));
} }
return ETIMEDOUT; return ETIMEDOUT;
@ -78,22 +92,27 @@ static int receipt_recv_timeout_callback(struct fast_task_info *task)
static void receipt_task_finish_cleanup(struct fast_task_info *task) static void receipt_task_finish_cleanup(struct fast_task_info *task)
{ {
IdempotencyClientChannel *channel; IdempotencyClientChannel *channel;
char formatted_ip[FORMATTED_IP_SIZE];
if (task->event.fd >= 0) { if (task->event.fd >= 0) {
sf_task_detach_thread(task); sf_task_detach_thread(task);
close(task->event.fd);
task->event.fd = -1;
} }
channel = (IdempotencyClientChannel *)task->arg; sf_nio_reset_task_length(task);
task->req_count = 0;
task->pending_send_count = 0;
channel = (IdempotencyClientChannel *)task->arg;
fc_list_del_init(&channel->dlink); fc_list_del_init(&channel->dlink);
__sync_bool_compare_and_swap(&channel->established, 1, 0); __sync_bool_compare_and_swap(&channel->established, 1, 0);
__sync_bool_compare_and_swap(&channel->in_ioevent, 1, 0); __sync_bool_compare_and_swap(&channel->in_ioevent, 1, 0);
if (FC_LOG_BY_LEVEL(LOG_DEBUG)) {
format_ip_address(task->server_ip, formatted_ip);
logDebug("file: "__FILE__", line: %d, " logDebug("file: "__FILE__", line: %d, "
"receipt task for server %s:%u exit", "receipt task for server %s:%u exit",
__LINE__, task->server_ip, task->port); __LINE__, formatted_ip, task->port);
}
} }
static void setup_channel_request(struct fast_task_info *task) static void setup_channel_request(struct fast_task_info *task)
@ -103,14 +122,15 @@ static void setup_channel_request(struct fast_task_info *task)
SFProtoSetupChannelReq *req; SFProtoSetupChannelReq *req;
channel = (IdempotencyClientChannel *)task->arg; channel = (IdempotencyClientChannel *)task->arg;
header = (SFCommonProtoHeader *)task->data; header = (SFCommonProtoHeader *)task->send.ptr->data;
req = (SFProtoSetupChannelReq *)(header + 1); req = (SFProtoSetupChannelReq *)(header + 1);
int2buff(__sync_add_and_fetch(&channel->id, 0), req->channel_id); int2buff(__sync_add_and_fetch(&channel->id, 0), req->channel_id);
int2buff(__sync_add_and_fetch(&channel->key, 0), req->key); int2buff(__sync_add_and_fetch(&channel->key, 0), req->key);
SF_PROTO_SET_HEADER(header, SF_SERVICE_PROTO_SETUP_CHANNEL_REQ, SF_PROTO_SET_HEADER(header, SF_SERVICE_PROTO_SETUP_CHANNEL_REQ,
sizeof(SFProtoSetupChannelReq)); sizeof(SFProtoSetupChannelReq));
task->length = sizeof(SFCommonProtoHeader) + sizeof(SFProtoSetupChannelReq); task->send.ptr->length = sizeof(SFCommonProtoHeader) +
sizeof(SFProtoSetupChannelReq);
sf_send_add_event(task); sf_send_add_event(task);
} }
@ -137,10 +157,10 @@ static int check_report_req_receipt(struct fast_task_info *task)
return 0; return 0;
} }
header = (SFCommonProtoHeader *)task->data; header = (SFCommonProtoHeader *)task->send.ptr->data;
rheader = (SFProtoReportReqReceiptHeader *)(header + 1); rheader = (SFProtoReportReqReceiptHeader *)(header + 1);
rbody = rstart = (SFProtoReportReqReceiptBody *)(rheader + 1); rbody = rstart = (SFProtoReportReqReceiptBody *)(rheader + 1);
buff_end = task->data + channel->buffer_size; buff_end = task->send.ptr->data + channel->buffer_size;
last = NULL; last = NULL;
receipt = channel->waiting_resp_qinfo.head; receipt = channel->waiting_resp_qinfo.head;
do { do {
@ -170,8 +190,9 @@ static int check_report_req_receipt(struct fast_task_info *task)
count = rbody - rstart; count = rbody - rstart;
int2buff(count, rheader->count); int2buff(count, rheader->count);
task->length = (char *)rbody - task->data; task->send.ptr->length = (char *)rbody - task->send.ptr->data;
int2buff(task->length - sizeof(SFCommonProtoHeader), header->body_len); int2buff(task->send.ptr->length - sizeof(SFCommonProtoHeader),
header->body_len);
header->cmd = SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_REQ; header->cmd = SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_REQ;
sf_send_add_event(task); sf_send_add_event(task);
return count; return count;
@ -185,18 +206,18 @@ static void close_channel_request(struct fast_task_info *task)
channel = (IdempotencyClientChannel *)task->arg; channel = (IdempotencyClientChannel *)task->arg;
idempotency_client_channel_set_id_key(channel, 0, 0); idempotency_client_channel_set_id_key(channel, 0, 0);
header = (SFCommonProtoHeader *)task->data; header = (SFCommonProtoHeader *)task->send.ptr->data;
SF_PROTO_SET_HEADER(header, SF_SERVICE_PROTO_CLOSE_CHANNEL_REQ, 0); SF_PROTO_SET_HEADER(header, SF_SERVICE_PROTO_CLOSE_CHANNEL_REQ, 0);
task->length = sizeof(SFCommonProtoHeader); task->send.ptr->length = sizeof(SFCommonProtoHeader);
sf_send_add_event(task); sf_send_add_event(task);
} }
static void active_test_request(struct fast_task_info *task) static void active_test_request(struct fast_task_info *task)
{ {
SFCommonProtoHeader *header; SFCommonProtoHeader *header;
header = (SFCommonProtoHeader *)task->data; header = (SFCommonProtoHeader *)task->send.ptr->data;
SF_PROTO_SET_HEADER(header, SF_PROTO_ACTIVE_TEST_REQ, 0); SF_PROTO_SET_HEADER(header, SF_PROTO_ACTIVE_TEST_REQ, 0);
task->length = sizeof(SFCommonProtoHeader); task->send.ptr->length = sizeof(SFCommonProtoHeader);
sf_send_add_event(task); sf_send_add_event(task);
} }
@ -224,17 +245,22 @@ static void report_req_receipt_request(struct fast_task_info *task,
if (update_lru) { if (update_lru) {
update_lru_chain(task); update_lru_chain(task);
} }
task->pending_send_count++;
} }
} }
static inline int receipt_expect_body_length(struct fast_task_info *task, static inline int receipt_expect_body_length(struct fast_task_info *task,
const int expect_body_len) const int expect_body_len)
{ {
if ((int)(task->length - sizeof(SFCommonProtoHeader)) != expect_body_len) { int body_len;
char formatted_ip[FORMATTED_IP_SIZE];
body_len = task->recv.ptr->length - sizeof(SFCommonProtoHeader);
if (body_len != expect_body_len) {
format_ip_address(task->server_ip, formatted_ip);
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"server %s:%u, response body length: %d != %d", "server %s:%u, response body length: %d != %d", __LINE__,
__LINE__, task->server_ip, task->port, (int)(task->length - formatted_ip, task->port, body_len, expect_body_len);
sizeof(SFCommonProtoHeader)), expect_body_len);
return EINVAL; return EINVAL;
} }
@ -247,6 +273,7 @@ static int deal_setup_channel_response(struct fast_task_info *task)
IdempotencyReceiptThreadContext *thread_ctx; IdempotencyReceiptThreadContext *thread_ctx;
SFProtoSetupChannelResp *resp; SFProtoSetupChannelResp *resp;
IdempotencyClientChannel *channel; IdempotencyClientChannel *channel;
char formatted_ip[FORMATTED_IP_SIZE];
int channel_id; int channel_id;
int channel_key; int channel_key;
int buffer_size; int buffer_size;
@ -258,28 +285,30 @@ static int deal_setup_channel_response(struct fast_task_info *task)
} }
channel = (IdempotencyClientChannel *)task->arg; channel = (IdempotencyClientChannel *)task->arg;
if (__sync_add_and_fetch(&channel->established, 0)) { if (FC_ATOMIC_GET(channel->established)) {
format_ip_address(task->server_ip, formatted_ip);
logWarning("file: "__FILE__", line: %d, " logWarning("file: "__FILE__", line: %d, "
"response from server %s:%u, unexpected cmd: " "response from server %s:%u, unexpected cmd: "
"SETUP_CHANNEL_RESP, ignore it!", "SETUP_CHANNEL_RESP, ignore it!",
__LINE__, task->server_ip, task->port); __LINE__, formatted_ip, task->port);
return 0; return 0;
} }
resp = (SFProtoSetupChannelResp *)(task->data + sizeof(SFCommonProtoHeader)); resp = (SFProtoSetupChannelResp *)SF_PROTO_RECV_BODY(task);
channel_id = buff2int(resp->channel_id); channel_id = buff2int(resp->channel_id);
channel_key = buff2int(resp->key); channel_key = buff2int(resp->key);
buffer_size = buff2int(resp->buffer_size); buffer_size = buff2int(resp->buffer_size);
channel->server_id = buff2int(resp->server_id);
idempotency_client_channel_set_id_key(channel, channel_id, channel_key); idempotency_client_channel_set_id_key(channel, channel_id, channel_key);
if (__sync_bool_compare_and_swap(&channel->established, 0, 1)) { if (__sync_bool_compare_and_swap(&channel->established, 0, 1)) {
thread_ctx = (IdempotencyReceiptThreadContext *)task->thread_data->arg; thread_ctx = (IdempotencyReceiptThreadContext *)task->thread_data->arg;
fc_list_add_tail(&channel->dlink, &thread_ctx->head); fc_list_add_tail(&channel->dlink, &thread_ctx->head);
} }
channel->buffer_size = FC_MIN(buffer_size, task->size); channel->buffer_size = FC_MIN(buffer_size, task->send.ptr->size);
PTHREAD_MUTEX_LOCK(&channel->lc_pair.lock); PTHREAD_MUTEX_LOCK(&channel->lcp.lock);
pthread_cond_broadcast(&channel->lc_pair.cond); pthread_cond_broadcast(&channel->lcp.cond);
PTHREAD_MUTEX_UNLOCK(&channel->lc_pair.lock); PTHREAD_MUTEX_UNLOCK(&channel->lcp.lock);
if (channel->waiting_resp_qinfo.head != NULL) { if (channel->waiting_resp_qinfo.head != NULL) {
bool notify; bool notify;
@ -298,6 +327,7 @@ static inline int deal_report_req_receipt_response(struct fast_task_info *task)
IdempotencyClientChannel *channel; IdempotencyClientChannel *channel;
IdempotencyClientReceipt *current; IdempotencyClientReceipt *current;
IdempotencyClientReceipt *deleted; IdempotencyClientReceipt *deleted;
char formatted_ip[FORMATTED_IP_SIZE];
if ((result=receipt_expect_body_length(task, 0)) != 0) { if ((result=receipt_expect_body_length(task, 0)) != 0) {
return result; return result;
@ -305,13 +335,15 @@ static inline int deal_report_req_receipt_response(struct fast_task_info *task)
channel = (IdempotencyClientChannel *)task->arg; channel = (IdempotencyClientChannel *)task->arg;
if (channel->waiting_resp_qinfo.head == NULL) { if (channel->waiting_resp_qinfo.head == NULL) {
format_ip_address(task->server_ip, formatted_ip);
logWarning("file: "__FILE__", line: %d, " logWarning("file: "__FILE__", line: %d, "
"response from server %s:%u, unexpect cmd: " "response from server %s:%u, unexpect cmd: "
"REPORT_REQ_RECEIPT_RESP", __LINE__, "REPORT_REQ_RECEIPT_RESP", __LINE__,
task->server_ip, task->port); formatted_ip, task->port);
return 0; return EINVAL;
} }
task->pending_send_count--;
current = channel->waiting_resp_qinfo.head; current = channel->waiting_resp_qinfo.head;
do { do {
deleted = current; deleted = current;
@ -328,40 +360,46 @@ static inline int deal_report_req_receipt_response(struct fast_task_info *task)
static int receipt_deal_task(struct fast_task_info *task, const int stage) static int receipt_deal_task(struct fast_task_info *task, const int stage)
{ {
int result; int result;
SFCommonProtoHeader *header;
char formatted_ip[FORMATTED_IP_SIZE];
do { do {
if (stage == SF_NIO_STAGE_HANDSHAKE) { if (stage == SF_NIO_STAGE_HANDSHAKE) {
setup_channel_request(task); setup_channel_request(task);
result = 0; result = 0;
break; break;
} else if (stage == SF_NIO_STAGE_CONTINUE && task->length == 0) { } else if (stage == SF_NIO_STAGE_CONTINUE) {
if (task->pending_send_count == 0) {
if (((IdempotencyClientChannel *)task->arg)->established) { if (((IdempotencyClientChannel *)task->arg)->established) {
report_req_receipt_request(task, true); report_req_receipt_request(task, true);
} else { } else if (task->req_count > 0) {
sf_set_read_event(task); //trigger read event sf_set_read_event(task); //trigger read event
} }
}
result = 0; result = 0;
break; break;
} }
result = buff2short(((SFCommonProtoHeader *)task->data)->status); header = (SFCommonProtoHeader *)task->recv.ptr->data;
result = buff2short(header->status);
if (result != 0) { if (result != 0) {
int msg_len; int msg_len;
char *message; char *message;
msg_len = task->length - sizeof(SFCommonProtoHeader); msg_len = SF_RECV_BODY_LENGTH(task);
message = task->data + sizeof(SFCommonProtoHeader); message = SF_PROTO_RECV_BODY(task);
format_ip_address(task->server_ip, formatted_ip);
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"response from server %s:%u, cmd: %d (%s), " "response from server %s:%u, cmd: %d (%s), "
"status: %d, error info: %.*s", "status: %d, error info: %.*s", __LINE__,
__LINE__, task->server_ip, task->port, formatted_ip, task->port, header->cmd,
((SFCommonProtoHeader *)task->data)->cmd, sf_get_cmd_caption(header->cmd),
sf_get_cmd_caption(((SFCommonProtoHeader *)task->data)->cmd),
result, msg_len, message); result, msg_len, message);
break; break;
} }
switch (((SFCommonProtoHeader *)task->data)->cmd) { switch (header->cmd) {
case SF_SERVICE_PROTO_SETUP_CHANNEL_RESP: case SF_SERVICE_PROTO_SETUP_CHANNEL_RESP:
result = deal_setup_channel_response(task); result = deal_setup_channel_response(task);
break; break;
@ -369,29 +407,36 @@ static int receipt_deal_task(struct fast_task_info *task, const int stage)
result = deal_report_req_receipt_response(task); result = deal_report_req_receipt_response(task);
break; break;
case SF_PROTO_ACTIVE_TEST_RESP: case SF_PROTO_ACTIVE_TEST_RESP:
task->pending_send_count--;
result = 0; result = 0;
break; break;
case SF_SERVICE_PROTO_CLOSE_CHANNEL_RESP: case SF_SERVICE_PROTO_CLOSE_CHANNEL_RESP:
result = ECONNRESET; //force to close socket result = ECONNRESET; //force to close socket
if (FC_LOG_BY_LEVEL(LOG_DEBUG)) {
format_ip_address(task->server_ip, formatted_ip);
logDebug("file: "__FILE__", line: %d, " logDebug("file: "__FILE__", line: %d, "
"close channel to server %s:%u !!!", "close channel to server %s:%u !!!",
__LINE__, task->server_ip, task->port); __LINE__, formatted_ip, task->port);
}
break; break;
default: default:
format_ip_address(task->server_ip, formatted_ip);
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"response from server %s:%u, unexpect cmd: %d (%s)", "response from server %s:%u, unexpect cmd: %d (%s)",
__LINE__, task->server_ip, task->port, __LINE__, formatted_ip, task->port, header->cmd,
((SFCommonProtoHeader *)task->data)->cmd, sf_get_cmd_caption(header->cmd));
sf_get_cmd_caption(((SFCommonProtoHeader *)task->data)->cmd));
result = EINVAL; result = EINVAL;
break; break;
} }
if (result == 0) { if (result == 0) {
update_lru_chain(task); update_lru_chain(task);
task->offset = task->length = 0; task->recv.ptr->length = 0;
task->recv.ptr->offset = 0;
if (task->pending_send_count == 0) {
report_req_receipt_request(task, false); report_req_receipt_request(task, false);
} }
}
} while (0); } while (0);
return result > 0 ? -1 * result : result; return result > 0 ? -1 * result : result;
@ -410,9 +455,10 @@ static void receipt_thread_check_heartbeat(
break; break;
} }
if (sf_nio_task_is_idle(channel->task)) { if (channel->task->pending_send_count == 0) {
channel->last_pkg_time = g_current_time; channel->last_pkg_time = g_current_time;
active_test_request(channel->task); active_test_request(channel->task);
channel->task->pending_send_count++;
} }
} }
} }
@ -422,18 +468,22 @@ static void receipt_thread_close_idle_channel(
{ {
IdempotencyClientChannel *channel; IdempotencyClientChannel *channel;
IdempotencyClientChannel *tmp; IdempotencyClientChannel *tmp;
char formatted_ip[FORMATTED_IP_SIZE];
fc_list_for_each_entry_safe(channel, tmp, &thread_ctx->head, dlink) { fc_list_for_each_entry_safe(channel, tmp, &thread_ctx->head, dlink) {
if (!sf_nio_task_is_idle(channel->task)) { if (channel->task->pending_send_count > 0) {
continue; continue;
} }
if (g_current_time - channel->last_report_time > if (g_current_time - channel->last_report_time >
g_idempotency_client_cfg.channel_max_idle_time) g_idempotency_client_cfg.channel_max_idle_time)
{ {
if (FC_LOG_BY_LEVEL(LOG_DEBUG)) {
format_ip_address(channel->task->server_ip, formatted_ip);
logDebug("file: "__FILE__", line: %d, " logDebug("file: "__FILE__", line: %d, "
"close channel to server %s:%u because idle too long", "close channel to server %s:%u because idle too long",
__LINE__, channel->task->server_ip, channel->task->port); __LINE__, formatted_ip, channel->task->port);
}
close_channel_request(channel->task); close_channel_request(channel->task);
} }
} }
@ -464,40 +514,61 @@ static void *receipt_alloc_thread_extra_data(const int thread_index)
{ {
IdempotencyReceiptThreadContext *ctx; IdempotencyReceiptThreadContext *ctx;
ctx = receipt_thread_contexts + thread_index; ctx = RECEIPT_THREAD_CONTEXTS + thread_index;
FC_INIT_LIST_HEAD(&ctx->head); FC_INIT_LIST_HEAD(&ctx->head);
return ctx; return ctx;
} }
static int do_init() static int do_init(FCAddressPtrArray *address_array)
{ {
const int task_arg_size = 0;
const bool double_buffers = false;
const bool need_shrink_task_buffer = false;
const bool explicit_post_recv = false;
int result;
int bytes; int bytes;
SFNetworkHandler *rdma_handler;
struct ibv_pd *pd;
bytes = sizeof(IdempotencyReceiptThreadContext) * SF_G_WORK_THREADS; bytes = sizeof(IdempotencyReceiptThreadContext) * SF_G_WORK_THREADS;
receipt_thread_contexts = (IdempotencyReceiptThreadContext *) RECEIPT_THREAD_CONTEXTS = (IdempotencyReceiptThreadContext *)
fc_malloc(bytes); fc_malloc(bytes);
if (receipt_thread_contexts == NULL) { if (RECEIPT_THREAD_CONTEXTS == NULL) {
return ENOMEM; return ENOMEM;
} }
memset(receipt_thread_contexts, 0, bytes); memset(RECEIPT_THREAD_CONTEXTS, 0, bytes);
if ((rdma_handler=sf_get_rdma_network_handler(&g_sf_context)) != NULL) {
if ((result=sf_alloc_rdma_pd(&g_sf_context, address_array)) != 0) {
return result;
}
TASK_PADDING_SIZE = rdma_handler->get_connection_size();
RDMA_INIT_CONNECTION = rdma_handler->init_connection;
pd = rdma_handler->pd;
} else {
TASK_PADDING_SIZE = 0;
RDMA_INIT_CONNECTION = NULL;
pd = NULL;
}
return sf_service_init_ex2(&g_sf_context, "idemp-receipt", return sf_service_init_ex2(&g_sf_context, "idemp-receipt",
receipt_alloc_thread_extra_data, receipt_thread_loop_callback, receipt_alloc_thread_extra_data, receipt_thread_loop_callback,
NULL, sf_proto_set_body_length, receipt_deal_task, NULL, sf_proto_set_body_length, NULL, NULL, receipt_deal_task,
receipt_task_finish_cleanup, receipt_recv_timeout_callback, receipt_task_finish_cleanup, receipt_recv_timeout_callback,
1000, sizeof(SFCommonProtoHeader), 0, receipt_init_task, NULL); 1000, sizeof(SFCommonProtoHeader), TASK_PADDING_SIZE,
task_arg_size, double_buffers, need_shrink_task_buffer,
explicit_post_recv, receipt_init_task, pd, NULL);
} }
int receipt_handler_init() int receipt_handler_init(FCAddressPtrArray *address_array)
{ {
int result; int result;
if ((result=do_init()) != 0) { if ((result=do_init(address_array)) != 0) {
return result; return result;
} }
sf_enable_thread_notify(true); sf_enable_thread_notify(true);
sf_set_remove_from_ready_list(false);
fc_sleep_ms(100); fc_sleep_ms(100);
return 0; return 0;

View File

@ -24,7 +24,7 @@
extern "C" { extern "C" {
#endif #endif
int receipt_handler_init(); int receipt_handler_init(FCAddressPtrArray *address_array);
int receipt_handler_destroy(); int receipt_handler_destroy();
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -44,7 +44,6 @@
&client_ctx->common_cfg.net_retry_cfg.interval_mm, \ &client_ctx->common_cfg.net_retry_cfg.interval_mm, \
&client_ctx->common_cfg.net_retry_cfg.network); \ &client_ctx->common_cfg.net_retry_cfg.network); \
\ \
while (1) { \
if (idempotency_enabled) { \ if (idempotency_enabled) { \
req_id = idempotency_client_channel_next_seq_id( \ req_id = idempotency_client_channel_next_seq_id( \
connection_params->channel); \ connection_params->channel); \
@ -52,6 +51,7 @@
req_id = 0; \ req_id = 0; \
} \ } \
\ \
while (1) { \
old_channel = connection_params != NULL ? \ old_channel = connection_params != NULL ? \
connection_params->channel : NULL; \ connection_params->channel : NULL; \
i = 0; \ i = 0; \
@ -79,7 +79,8 @@
connection_params->channel) == 0) \ connection_params->channel) == 0) \
{ \ { \
if ((conn_result=sf_proto_rebind_idempotency_channel( \ if ((conn_result=sf_proto_rebind_idempotency_channel( \
conn, connection_params->channel->id, \ conn, (conn_manager)->module_name, \
connection_params->channel->id, \
connection_params->channel->key, \ connection_params->channel->key, \
client_ctx->common_cfg.network_timeout)) == 0) \ client_ctx->common_cfg.network_timeout)) == 0) \
{ \ { \

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _IDEMPOTENCY_COMMON_TYPES_H
#define _IDEMPOTENCY_COMMON_TYPES_H
#include "fastcommon/common_define.h"
#define SF_IDEMPOTENCY_CHANNEL_ID_BITS 16
#define SF_IDEMPOTENCY_REQUEST_ID_BITS (64 - SF_IDEMPOTENCY_CHANNEL_ID_BITS)
#define SF_IDEMPOTENCY_MAX_CHANNEL_COUNT ((1 << SF_IDEMPOTENCY_CHANNEL_ID_BITS) - 1)
#define SF_IDEMPOTENCY_MAX_CHANNEL_ID SF_IDEMPOTENCY_MAX_CHANNEL_COUNT
#define SF_IDEMPOTENCY_SERVER_ID_OFFSET 48
#define SF_IDEMPOTENCY_CHANNEL_ID_OFFSET 32
#define SF_IDEMPOTENCY_NEXT_REQ_ID(server_id, channel_id, seq) \
(((int64_t)server_id) << SF_IDEMPOTENCY_SERVER_ID_OFFSET) | \
(((int64_t)channel_id) << SF_IDEMPOTENCY_CHANNEL_ID_OFFSET) | \
(int64_t)seq
#define SF_IDEMPOTENCY_EXTRACT_SERVER_ID(req_id) \
(int)((req_id >> SF_IDEMPOTENCY_SERVER_ID_OFFSET) & 0xFFFF)
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,250 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <limits.h>
#include <sys/stat.h>
#include "fastcommon/shared_func.h"
#include "fastcommon/logger.h"
#include "fastcommon/fc_atomic.h"
#include "sf/sf_global.h"
#include "request_metadata.h"
static struct {
int process_interval_ms;
int master_side_timeout; //in seconds
struct {
IdempotencyRequestMetadataContext *head;
IdempotencyRequestMetadataContext *tail;
} list;
} g_request_metadata = {1000, 300, {NULL, NULL}};
#define CHECK_MASTER_METADATA(meta) \
(meta != NULL && g_current_time - (long)meta->enqueue_time > \
g_request_metadata.master_side_timeout)
static void process_master_side(IdempotencyRequestMetadataContext *ctx)
{
struct fast_mblock_chain chain;
struct fast_mblock_node *node;
chain.head = chain.tail = NULL;
PTHREAD_MUTEX_LOCK(&ctx->lock);
if (CHECK_MASTER_METADATA(ctx->list.head)) {
do {
node = fast_mblock_to_node_ptr(ctx->list.head);
if (chain.head == NULL) {
chain.head = node;
} else {
chain.tail->next = node;
}
chain.tail = node;
ctx->list.head = ctx->list.head->next;
} while (CHECK_MASTER_METADATA(ctx->list.head));
if (ctx->list.head == NULL) {
ctx->list.tail = NULL;
}
chain.tail->next = NULL;
}
if (chain.head != NULL) {
fast_mblock_batch_free(&ctx->allocator, &chain);
}
PTHREAD_MUTEX_UNLOCK(&ctx->lock);
}
#define CHECK_SLAVE_METADATA(meta, dv) \
(meta != NULL && meta->data_version <= dv)
static void process_slave_side(IdempotencyRequestMetadataContext *ctx,
const int64_t data_version)
{
struct fast_mblock_chain chain;
struct fast_mblock_node *node;
chain.head = chain.tail = NULL;
PTHREAD_MUTEX_LOCK(&ctx->lock);
if (CHECK_SLAVE_METADATA(ctx->list.head, data_version)) {
do {
node = fast_mblock_to_node_ptr(ctx->list.head);
if (chain.head == NULL) {
chain.head = node;
} else {
chain.tail->next = node;
}
chain.tail = node;
ctx->list.head = ctx->list.head->next;
} while (CHECK_SLAVE_METADATA(ctx->list.head, data_version));
if (ctx->list.head == NULL) {
ctx->list.tail = NULL;
}
chain.tail->next = NULL;
}
if (chain.head != NULL) {
fast_mblock_batch_free(&ctx->allocator, &chain);
}
PTHREAD_MUTEX_UNLOCK(&ctx->lock);
}
static void *thread_run(void *arg)
{
IdempotencyRequestMetadataContext *ctx;
int64_t data_version;
#ifdef OS_LINUX
prctl(PR_SET_NAME, "idemp-req-meta");
#endif
ctx = g_request_metadata.list.head;
while (SF_G_CONTINUE_FLAG) {
fc_sleep_ms(g_request_metadata.process_interval_ms);
if (ctx->is_master_callback.func(ctx->is_master_callback.
arg, &data_version))
{
process_master_side(ctx);
} else if (data_version > 0) {
process_slave_side(ctx, data_version);
}
ctx = ctx->next;
if (ctx == NULL) {
ctx = g_request_metadata.list.head;
}
}
return NULL;
}
int idempotency_request_metadata_init(IdempotencyRequestMetadataContext
*ctx, sf_is_master_callback is_master_callback, void *arg)
{
int result;
if ((result=fast_mblock_init_ex1(&ctx->allocator, "req-metadata-info",
sizeof(IdempotencyRequestMetadata), 8192, 0,
NULL, NULL, false)) != 0)
{
return result;
}
if ((result=init_pthread_lock(&ctx->lock)) != 0) {
return result;
}
ctx->is_master_callback.func = is_master_callback;
ctx->is_master_callback.arg = arg;
ctx->list.head = ctx->list.tail = NULL;
ctx->next = NULL;
if (g_request_metadata.list.head == NULL) {
g_request_metadata.list.head = ctx;
} else {
g_request_metadata.list.tail->next = ctx;
}
g_request_metadata.list.tail = ctx;
return 0;
}
int idempotency_request_metadata_start(const int process_interval_ms,
const int master_side_timeout)
{
pthread_t tid;
if (g_request_metadata.list.head == NULL) {
logError("file: "__FILE__", line: %d, "
"list is empty!", __LINE__);
return ENOENT;
}
if (process_interval_ms <= 0) {
logError("file: "__FILE__", line: %d, "
"invalid process interval: %d!",
__LINE__, process_interval_ms);
return EINVAL;
}
if (master_side_timeout <= 0) {
logError("file: "__FILE__", line: %d, "
"invalid master side timeout: %d!",
__LINE__, master_side_timeout);
return EINVAL;
}
g_request_metadata.process_interval_ms = process_interval_ms;
g_request_metadata.master_side_timeout = master_side_timeout;
return fc_create_thread(&tid, thread_run, NULL,
SF_G_THREAD_STACK_SIZE);
}
int idempotency_request_metadata_add(IdempotencyRequestMetadataContext
*ctx, const SFRequestMetadata *metadata, const int n)
{
IdempotencyRequestMetadata *idemp_meta;
PTHREAD_MUTEX_LOCK(&ctx->lock);
do {
if ((idemp_meta=fast_mblock_alloc_object(&ctx->allocator)) == NULL) {
break;
}
idemp_meta->req_id = metadata->req_id;
idemp_meta->data_version = metadata->data_version;
idemp_meta->n = n;
idemp_meta->enqueue_time = g_current_time;
idemp_meta->next = NULL;
if (ctx->list.head == NULL) {
ctx->list.head = idemp_meta;
} else {
ctx->list.tail->next = idemp_meta;
}
ctx->list.tail = idemp_meta;
} while (0);
PTHREAD_MUTEX_UNLOCK(&ctx->lock);
return (idemp_meta != NULL ? 0 : ENOMEM);
}
int idempotency_request_metadata_get(IdempotencyRequestMetadataContext
*ctx, const int64_t req_id, int64_t *data_version, int *n)
{
int result;
IdempotencyRequestMetadata *meta;
result = ENOENT;
PTHREAD_MUTEX_LOCK(&ctx->lock);
meta = ctx->list.head;
while (meta != NULL) {
if (req_id == meta->req_id) {
result = 0;
*data_version = meta->data_version;
if (n != NULL) {
*n = meta->n;
}
break;
}
meta = meta->next;
}
PTHREAD_MUTEX_UNLOCK(&ctx->lock);
return result;
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _SF_IDEMPOTENCY_REQUEST_METADATA_H
#define _SF_IDEMPOTENCY_REQUEST_METADATA_H
#include "server_types.h"
typedef bool (*sf_is_master_callback)(void *arg, int64_t *data_version);
typedef struct idempotency_request_metadata {
int64_t req_id;
int64_t data_version;
int n; //integer argument
uint32_t enqueue_time;
struct idempotency_request_metadata *next;
} IdempotencyRequestMetadata;
typedef struct idempotency_request_metadata_context {
struct {
sf_is_master_callback func;
void *arg;
} is_master_callback;
struct fast_mblock_man allocator; //element: IdempotencyRequestMetadata
pthread_mutex_t lock;
struct {
IdempotencyRequestMetadata *head;
IdempotencyRequestMetadata *tail;
} list;
struct idempotency_request_metadata_context *next;
} IdempotencyRequestMetadataContext;
#ifdef __cplusplus
extern "C" {
#endif
int idempotency_request_metadata_init(IdempotencyRequestMetadataContext
*ctx, sf_is_master_callback is_master_callback, void *arg);
int idempotency_request_metadata_start(const int process_interval_ms,
const int master_side_timeout);
int idempotency_request_metadata_add(IdempotencyRequestMetadataContext
*ctx, const SFRequestMetadata *metadata, const int n);
int idempotency_request_metadata_get(IdempotencyRequestMetadataContext
*ctx, const int64_t req_id, int64_t *data_version, int *n);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -37,12 +37,9 @@
#include "server_channel.h" #include "server_channel.h"
#include "server_handler.h" #include "server_handler.h"
#define SF_TASK_BODY_LENGTH(task) \
(task->length - sizeof(SFCommonProtoHeader))
int sf_server_deal_setup_channel(struct fast_task_info *task, int sf_server_deal_setup_channel(struct fast_task_info *task,
int *task_type, IdempotencyChannel **channel, int *task_type, const int server_id, IdempotencyChannel
SFResponseInfo *response) **channel, SFResponseInfo *response)
{ {
int result; int result;
SFProtoSetupChannelReq *req; SFProtoSetupChannelReq *req;
@ -52,13 +49,13 @@ int sf_server_deal_setup_channel(struct fast_task_info *task,
response->header.cmd = SF_SERVICE_PROTO_SETUP_CHANNEL_RESP; response->header.cmd = SF_SERVICE_PROTO_SETUP_CHANNEL_RESP;
if ((result=sf_server_expect_body_length(response, if ((result=sf_server_expect_body_length(response,
SF_TASK_BODY_LENGTH(task), SF_RECV_BODY_LENGTH(task),
sizeof(SFProtoSetupChannelReq))) != 0) sizeof(SFProtoSetupChannelReq))) != 0)
{ {
return result; return result;
} }
req = (SFProtoSetupChannelReq *)(task->data + sizeof(SFCommonProtoHeader)); req = (SFProtoSetupChannelReq *)SF_PROTO_RECV_BODY(task);
channel_id = buff2int(req->channel_id); channel_id = buff2int(req->channel_id);
key = buff2int(req->key); key = buff2int(req->key);
if (*channel != NULL) { if (*channel != NULL) {
@ -74,14 +71,13 @@ int sf_server_deal_setup_channel(struct fast_task_info *task,
"alloc channel fail, hint channel id: %d", channel_id); "alloc channel fail, hint channel id: %d", channel_id);
return ENOMEM; return ENOMEM;
} }
*task_type = SF_SERVER_TASK_TYPE_CHANNEL_HOLDER; *task_type = SF_SERVER_TASK_TYPE_CHANNEL_HOLDER;
resp = (SFProtoSetupChannelResp *)(task->data + resp = (SFProtoSetupChannelResp *)SF_PROTO_SEND_BODY(task);
sizeof(SFCommonProtoHeader));
int2buff((*channel)->id, resp->channel_id); int2buff((*channel)->id, resp->channel_id);
int2buff((*channel)->key, resp->key); int2buff((*channel)->key, resp->key);
int2buff(task->size, resp->buffer_size); int2buff(server_id, resp->server_id);
int2buff(task->send.ptr->size, resp->buffer_size);
response->header.body_len = sizeof(SFProtoSetupChannelResp); response->header.body_len = sizeof(SFProtoSetupChannelResp);
return 0; return 0;
} }
@ -135,19 +131,19 @@ int sf_server_deal_report_req_receipt(struct fast_task_info *task,
SFProtoReportReqReceiptBody *body_part; SFProtoReportReqReceiptBody *body_part;
SFProtoReportReqReceiptBody *body_end; SFProtoReportReqReceiptBody *body_end;
response->header.cmd = SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_RESP;
if ((result=check_holder_channel(task_type, channel, response)) != 0) { if ((result=check_holder_channel(task_type, channel, response)) != 0) {
return result; return result;
} }
body_len = SF_TASK_BODY_LENGTH(task); body_len = SF_RECV_BODY_LENGTH(task);
if ((result=sf_server_check_min_body_length(response, body_len, if ((result=sf_server_check_min_body_length(response, body_len,
sizeof(SFProtoReportReqReceiptHeader))) != 0) sizeof(SFProtoReportReqReceiptHeader))) != 0)
{ {
return result; return result;
} }
body_header = (SFProtoReportReqReceiptHeader *) body_header = (SFProtoReportReqReceiptHeader *)SF_PROTO_RECV_BODY(task);
(task->data + sizeof(SFCommonProtoHeader));
count = buff2int(body_header->count); count = buff2int(body_header->count);
calc_body_len = sizeof(SFProtoReportReqReceiptHeader) + calc_body_len = sizeof(SFProtoReportReqReceiptHeader) +
sizeof(SFProtoReportReqReceiptBody) * count; sizeof(SFProtoReportReqReceiptBody) * count;
@ -169,7 +165,6 @@ int sf_server_deal_report_req_receipt(struct fast_task_info *task,
} }
//logInfo("receipt count: %d, success: %d", count, success); //logInfo("receipt count: %d, success: %d", count, success);
response->header.cmd = SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_RESP;
return 0; return 0;
} }
@ -189,7 +184,7 @@ IdempotencyRequest *sf_server_update_prepare_and_check(
} }
adheader = (SFProtoIdempotencyAdditionalHeader *)req->body; adheader = (SFProtoIdempotencyAdditionalHeader *)req->body;
request = (IdempotencyRequest *)fast_mblock_alloc_object(request_allocator); request = fast_mblock_alloc_object(request_allocator);
if (request == NULL) { if (request == NULL) {
*result = ENOMEM; *result = ENOMEM;
return NULL; return NULL;
@ -220,7 +215,7 @@ int sf_server_deal_rebind_channel(struct fast_task_info *task,
SFProtoRebindChannelReq *req; SFProtoRebindChannelReq *req;
if ((result=sf_server_expect_body_length(response, if ((result=sf_server_expect_body_length(response,
SF_TASK_BODY_LENGTH(task), SF_RECV_BODY_LENGTH(task),
sizeof(SFProtoRebindChannelReq))) != 0) sizeof(SFProtoRebindChannelReq))) != 0)
{ {
return result; return result;
@ -240,7 +235,7 @@ int sf_server_deal_rebind_channel(struct fast_task_info *task,
} }
idempotency_channel_release(*channel, false); idempotency_channel_release(*channel, false);
req = (SFProtoRebindChannelReq *)(task->data + sizeof(SFCommonProtoHeader)); req = (SFProtoRebindChannelReq *)SF_PROTO_RECV_BODY(task);
channel_id = buff2int(req->channel_id); channel_id = buff2int(req->channel_id);
key = buff2int(req->key); key = buff2int(req->key);
*channel = idempotency_channel_find_and_hold(channel_id, key, &result); *channel = idempotency_channel_find_and_hold(channel_id, key, &result);

View File

@ -25,8 +25,8 @@ extern "C" {
#endif #endif
int sf_server_deal_setup_channel(struct fast_task_info *task, int sf_server_deal_setup_channel(struct fast_task_info *task,
int *task_type, IdempotencyChannel **channel, int *task_type, const int server_id, IdempotencyChannel
SFResponseInfo *response); **channel, SFResponseInfo *response);
int sf_server_deal_close_channel(struct fast_task_info *task, int sf_server_deal_close_channel(struct fast_task_info *task,
int *task_type, IdempotencyChannel **channel, int *task_type, IdempotencyChannel **channel,

View File

@ -19,11 +19,7 @@
#include "fastcommon/fast_mblock.h" #include "fastcommon/fast_mblock.h"
#include "fastcommon/fast_timer.h" #include "fastcommon/fast_timer.h"
#include "sf/idempotency/common/idempotency_types.h"
#define SF_IDEMPOTENCY_CHANNEL_ID_BITS 16
#define SF_IDEMPOTENCY_REQUEST_ID_BITS (64 - SF_IDEMPOTENCY_CHANNEL_ID_BITS)
#define SF_IDEMPOTENCY_MAX_CHANNEL_COUNT ((1 << SF_IDEMPOTENCY_CHANNEL_ID_BITS) - 1)
#define SF_IDEMPOTENCY_MAX_CHANNEL_ID SF_IDEMPOTENCY_MAX_CHANNEL_COUNT
#define SF_IDEMPOTENCY_DEFAULT_REQUEST_HINT_CAPACITY 1023 #define SF_IDEMPOTENCY_DEFAULT_REQUEST_HINT_CAPACITY 1023
#define SF_IDEMPOTENCY_DEFAULT_CHANNEL_RESERVE_INTERVAL 600 #define SF_IDEMPOTENCY_DEFAULT_CHANNEL_RESERVE_INTERVAL 600
@ -61,6 +57,17 @@ typedef struct idempotency_channel {
struct idempotency_channel *next; struct idempotency_channel *next;
} IdempotencyChannel; } IdempotencyChannel;
typedef struct sf_request_metadata {
int64_t req_id;
int64_t data_version;
} SFRequestMetadata;
typedef struct sf_request_metadata_array {
SFRequestMetadata *elts;
int count;
int alloc;
} SFRequestMetadataArray;
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif

View File

@ -68,10 +68,6 @@ static int parse(SFBinlogIndexContext *ctx, const string_t *lines,
const string_t *end; const string_t *end;
void *bindex; void *bindex;
if (row_count < 1) {
return EINVAL;
}
if ((result=parse_header(lines, &record_count, &ctx-> if ((result=parse_header(lines, &record_count, &ctx->
last_version, error_info)) != 0) last_version, error_info)) != 0)
{ {
@ -140,7 +136,9 @@ static int load(SFBinlogIndexContext *ctx)
} }
row_count = split_string_ex(&context, '\n', lines, row_count, true); row_count = split_string_ex(&context, '\n', lines, row_count, true);
if (row_count > 0) {
result = parse(ctx, lines, row_count); result = parse(ctx, lines, row_count);
}
free(lines); free(lines);
free(context.str); free(context.str);
return result; return result;
@ -175,7 +173,9 @@ static int save(SFBinlogIndexContext *ctx, const char *filename)
int i; int i;
int result; int result;
if ((fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0644)) < 0) { if ((fd=open(filename, O_WRONLY | O_CREAT | O_TRUNC |
O_CLOEXEC, 0644)) < 0)
{
result = errno != 0 ? errno : EIO; result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"open file %s fail, errno: %d, error info: %s", "open file %s fail, errno: %d, error info: %s",
@ -186,9 +186,11 @@ static int save(SFBinlogIndexContext *ctx, const char *filename)
result = 0; result = 0;
p = buff; p = buff;
bend = buff + sizeof(buff); bend = buff + sizeof(buff);
p += sprintf(p, "%d %"PRId64"\n",
ctx->index_array.count, p += fc_itoa(ctx->index_array.count, p);
ctx->last_version); *p++ = ' ';
p += fc_itoa(ctx->last_version, p);
*p++ = '\n';
index = ctx->index_array.indexes; index = ctx->index_array.indexes;
for (i=0; i<ctx->index_array.count; i++) { for (i=0; i<ctx->index_array.count; i++) {
@ -227,7 +229,7 @@ int sf_binlog_index_save(SFBinlogIndexContext *ctx)
int result; int result;
char tmp_filename[PATH_MAX]; char tmp_filename[PATH_MAX];
snprintf(tmp_filename, sizeof(tmp_filename), "%s.tmp", ctx->filename); fc_combine_two_strings(ctx->filename, "tmp", '.', tmp_filename);
if ((result=save(ctx, tmp_filename)) != 0) { if ((result=save(ctx, tmp_filename)) != 0) {
return result; return result;
} }

View File

@ -13,18 +13,6 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>. * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/ */
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <limits.h>
#include <fcntl.h>
#include <pthread.h>
#include "fastcommon/logger.h" #include "fastcommon/logger.h"
#include "fastcommon/sockopt.h" #include "fastcommon/sockopt.h"
#include "fastcommon/shared_func.h" #include "fastcommon/shared_func.h"
@ -34,6 +22,8 @@
#include "sf_func.h" #include "sf_func.h"
#include "sf_binlog_writer.h" #include "sf_binlog_writer.h"
#define ERRNO_THREAD_EXIT -1000
static inline void binlog_writer_set_next_version(SFBinlogWriterInfo *writer, static inline void binlog_writer_set_next_version(SFBinlogWriterInfo *writer,
const uint64_t next_version) const uint64_t next_version)
{ {
@ -44,7 +34,8 @@ static inline void binlog_writer_set_next_version(SFBinlogWriterInfo *writer,
} }
#define deal_binlog_one_record(wb) \ #define deal_binlog_one_record(wb) \
sf_file_writer_deal_buffer(&wb->writer->fw, &wb->bf, wb->version.last) sf_file_writer_deal_versioned_buffer(&wb->writer->fw, \
&wb->bf, wb->version.last)
#define GET_WBUFFER_VERSION_COUNT(wb) \ #define GET_WBUFFER_VERSION_COUNT(wb) \
(((wb)->version.last - (wb)->version.first) + 1) (((wb)->version.last - (wb)->version.first) + 1)
@ -70,9 +61,9 @@ static int deal_record_by_version(SFBinlogWriterBuffer *wb)
if (wb->version.first < writer->version_ctx.next) { if (wb->version.first < writer->version_ctx.next) {
logError("file: "__FILE__", line: %d, subdir_name: %s, " logError("file: "__FILE__", line: %d, subdir_name: %s, "
"current version: %"PRId64" is too small which " "current version: %"PRId64" is too small which "
"less than %"PRId64", tag: %"PRId64", buffer(%d): %.*s", "less than %"PRId64", buffer(%d): %.*s",
__LINE__, writer->fw.cfg.subdir_name, wb->version.first, __LINE__, writer->fw.cfg.subdir_name, wb->version.first,
writer->version_ctx.next, wb->tag, wb->bf.length, writer->version_ctx.next, wb->bf.length,
wb->bf.length, wb->bf.buff); wb->bf.length, wb->bf.buff);
fast_mblock_free_object(&writer->thread->mblock, wb); fast_mblock_free_object(&writer->thread->mblock, wb);
return 0; return 0;
@ -165,9 +156,6 @@ static inline int flush_writer_files(SFBinlogWriterThread *thread)
return result; return result;
} }
if (writer->fw.flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
writer->fw.last_versions.done = writer->fw.last_versions.pending;
}
writer->flush.in_queue = false; writer->flush.in_queue = false;
writer = writer->flush.next; writer = writer->flush.next;
} }
@ -177,9 +165,10 @@ static inline int flush_writer_files(SFBinlogWriterThread *thread)
} }
static int deal_binlog_records(SFBinlogWriterThread *thread, static int deal_binlog_records(SFBinlogWriterThread *thread,
SFBinlogWriterBuffer *wb_head) SFBinlogWriterBuffer *wb_head, uint32_t *last_timestamp)
{ {
int result; int result;
bool skip_empty_file;
SFBinlogWriterBuffer *wbuffer; SFBinlogWriterBuffer *wbuffer;
SFBinlogWriterBuffer *current; SFBinlogWriterBuffer *current;
@ -187,21 +176,73 @@ static int deal_binlog_records(SFBinlogWriterThread *thread,
do { do {
current = wbuffer; current = wbuffer;
wbuffer = wbuffer->next; wbuffer = wbuffer->next;
if (wbuffer == NULL) {
*last_timestamp = current->timestamp;
}
switch (current->type) { switch (current->type) {
case SF_BINLOG_BUFFER_TYPE_CHANGE_ORDER_TYPE: case SF_BINLOG_BUFFER_TYPE_CHANGE_ORDER_TYPE:
thread->order_by = current->version.first; current->writer->order_by = current->version.first;
fast_mblock_free_object(&current->writer-> fast_mblock_free_object(&current->writer->
thread->mblock, current); thread->mblock, current);
break; break;
case SF_BINLOG_BUFFER_TYPE_CHANGE_PASSIVE_WRITE:
thread->passive_write = current->version.first;
fast_mblock_free_object(&current->writer->
thread->mblock, current);
break;
case SF_BINLOG_BUFFER_TYPE_CHANGE_CALL_FSYNC:
current->writer->fw.cfg.call_fsync = current->version.first;
fast_mblock_free_object(&current->writer->
thread->mblock, current);
break;
case SF_BINLOG_BUFFER_TYPE_ROTATE_FILE:
flush_writer_files(thread);
skip_empty_file = current->version.first;
if (!(skip_empty_file && current->writer->fw.file.size == 0)) {
if ((result=sf_file_writer_set_binlog_write_index(&current->
writer->fw, current->writer->fw.binlog.
last_index + 1)) != 0)
{
return result;
}
}
fast_mblock_free_object(&current->writer->
thread->mblock, current);
break;
case SF_BINLOG_BUFFER_TYPE_FLUSH_FILE:
if ((result=flush_writer_files(thread)) != 0) {
return result;
}
fast_mblock_free_object(&current->writer->
thread->mblock, current);
break;
case SF_BINLOG_BUFFER_TYPE_SET_WRITE_INDEX:
if ((result=sf_file_writer_set_binlog_write_index(&current->
writer->fw, current->version.first)) != 0)
{
return result;
}
fast_mblock_free_object(&current->writer->
thread->mblock, current);
break;
case SF_BINLOG_BUFFER_TYPE_NOTIFY_EXIT:
flush_writer_files(thread);
fast_mblock_free_object(&current->writer->
thread->mblock, current);
return ERRNO_THREAD_EXIT;
case SF_BINLOG_BUFFER_TYPE_SET_NEXT_VERSION: case SF_BINLOG_BUFFER_TYPE_SET_NEXT_VERSION:
if (thread->order_by != SF_BINLOG_THREAD_TYPE_ORDER_BY_VERSION) { if (current->writer->order_by !=
SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION &&
current->writer->thread->order_mode !=
SF_BINLOG_THREAD_ORDER_MODE_VARY)
{
logWarning("file: "__FILE__", line: %d, " logWarning("file: "__FILE__", line: %d, "
"subdir_name: %s, invalid order by: %d != %d, " "subdir_name: %s, order by: %d != %d, "
"maybe some mistake happen", __LINE__, "maybe some mistake happen?", __LINE__,
current->writer->fw.cfg.subdir_name, thread->order_by, current->writer->fw.cfg.subdir_name,
SF_BINLOG_THREAD_TYPE_ORDER_BY_VERSION); current->writer->order_by,
SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION);
} }
if (current->writer->version_ctx.ring.waiting_count != 0) { if (current->writer->version_ctx.ring.waiting_count != 0) {
@ -216,8 +257,10 @@ static int deal_binlog_records(SFBinlogWriterThread *thread,
__LINE__, current->writer->fw.cfg.subdir_name, __LINE__, current->writer->fw.cfg.subdir_name,
current->version.first); current->version.first);
if (current->writer->version_ctx.next != if ((current->writer->order_by ==
current->version.first) SF_BINLOG_WRITER_TYPE_ORDER_BY_NONE) ||
(current->writer->version_ctx.next !=
current->version.first))
{ {
binlog_writer_set_next_version(current->writer, binlog_writer_set_next_version(current->writer,
current->version.first); current->version.first);
@ -226,12 +269,13 @@ static int deal_binlog_records(SFBinlogWriterThread *thread,
fast_mblock_free_object(&current->writer-> fast_mblock_free_object(&current->writer->
thread->mblock, current); thread->mblock, current);
break; break;
default: default:
current->writer->fw.total_count++; current->writer->fw.total_count++;
add_to_flush_writer_queue(thread, current->writer); add_to_flush_writer_queue(thread, current->writer);
if (thread->order_by == SF_BINLOG_THREAD_TYPE_ORDER_BY_VERSION) { if (current->writer->order_by ==
SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION)
{
/* NOTE: current maybe be released in the deal function */ /* NOTE: current maybe be released in the deal function */
if ((result=deal_record_by_version(current)) != 0) { if ((result=deal_record_by_version(current)) != 0) {
return result; return result;
@ -248,19 +292,31 @@ static int deal_binlog_records(SFBinlogWriterThread *thread,
} }
} while (wbuffer != NULL); } while (wbuffer != NULL);
if (thread->passive_write) {
return 0;
} else {
return flush_writer_files(thread); return flush_writer_files(thread);
}
} }
void sf_binlog_writer_finish(SFBinlogWriterInfo *writer) void sf_binlog_writer_finish(SFBinlogWriterInfo *writer)
{ {
SFBinlogWriterBuffer *wb_head; SFBinlogWriterBuffer *wb_head;
uint32_t last_timestamp;
int count; int count;
if (writer->fw.file.name != NULL) { if (writer->fw.file.name.str != NULL) {
fc_queue_terminate(&writer->thread->queue); while (writer->thread->running && !fc_queue_empty(
&writer->thread->queue))
{
fc_sleep_ms(10);
}
if (writer->thread->running) {
sf_binlog_writer_notify_exit(writer);
}
count = 0; count = 0;
while (writer->thread->running && ++count < 300) { while (writer->thread->running && ++count < 500) {
fc_sleep_ms(10); fc_sleep_ms(10);
} }
@ -273,11 +329,12 @@ void sf_binlog_writer_finish(SFBinlogWriterInfo *writer)
wb_head = (SFBinlogWriterBuffer *)fc_queue_try_pop_all( wb_head = (SFBinlogWriterBuffer *)fc_queue_try_pop_all(
&writer->thread->queue); &writer->thread->queue);
if (wb_head != NULL) { if (wb_head != NULL) {
deal_binlog_records(writer->thread, wb_head); last_timestamp = 0;
deal_binlog_records(writer->thread, wb_head, &last_timestamp);
} }
free(writer->fw.file.name); free(writer->fw.file.name.str);
writer->fw.file.name = NULL; writer->fw.file.name.str = NULL;
} }
if (writer->fw.file.fd >= 0) { if (writer->fw.file.fd >= 0) {
@ -290,18 +347,22 @@ static void *binlog_writer_func(void *arg)
{ {
SFBinlogWriterThread *thread; SFBinlogWriterThread *thread;
SFBinlogWriterBuffer *wb_head; SFBinlogWriterBuffer *wb_head;
uint32_t last_record_time;
uint32_t current_timestamp;
uint32_t last_timestamp;
int result;
thread = (SFBinlogWriterThread *)arg; thread = (SFBinlogWriterThread *)arg;
#ifdef OS_LINUX #ifdef OS_LINUX
{ {
char thread_name[64]; char thread_name[64];
snprintf(thread_name, sizeof(thread_name), fc_combine_two_strings(thread->name, "writer", '-', thread_name);
"%s-writer", thread->name);
prctl(PR_SET_NAME, thread_name); prctl(PR_SET_NAME, thread_name);
} }
#endif #endif
last_record_time = current_timestamp = last_timestamp = 0;
thread->running = true; thread->running = true;
while (SF_G_CONTINUE_FLAG) { while (SF_G_CONTINUE_FLAG) {
wb_head = (SFBinlogWriterBuffer *)fc_queue_pop_all(&thread->queue); wb_head = (SFBinlogWriterBuffer *)fc_queue_pop_all(&thread->queue);
@ -309,12 +370,42 @@ static void *binlog_writer_func(void *arg)
continue; continue;
} }
if (deal_binlog_records(thread, wb_head) != 0) { if ((result=deal_binlog_records(thread, wb_head,
&last_record_time)) != 0)
{
if (result != ERRNO_THREAD_EXIT) {
logCrit("file: "__FILE__", line: %d, " logCrit("file: "__FILE__", line: %d, "
"deal_binlog_records fail, " "deal_binlog_records fail, "
"program exit!", __LINE__); "program exit!", __LINE__);
sf_terminate_myself(); sf_terminate_myself();
} }
break;
}
if (fc_queue_empty(&thread->queue)) {
current_timestamp = 0;
} else {
current_timestamp = last_record_time;
}
if ((current_timestamp == 0 && last_timestamp != 0) ||
(current_timestamp > last_timestamp))
{
last_timestamp = current_timestamp;
FC_ATOMIC_SET(thread->flow_ctrol.last_timestamp,
current_timestamp);
PTHREAD_MUTEX_LOCK(&thread->flow_ctrol.lcp.lock);
if (thread->flow_ctrol.waiting_count > 0) {
pthread_cond_broadcast(&thread->flow_ctrol.lcp.cond);
}
PTHREAD_MUTEX_UNLOCK(&thread->flow_ctrol.lcp.lock);
}
if (thread->write_interval_ms > 0 &&
last_record_time == g_current_time)
{
fc_sleep_ms(thread->write_interval_ms);
}
} }
thread->running = false; thread->running = false;
@ -341,19 +432,34 @@ static int binlog_wbuffer_alloc_init(void *element, void *args)
return 0; return 0;
} }
int sf_binlog_writer_init_normal(SFBinlogWriterInfo *writer, static void binlog_wbuffer_destroy_func(void *element, void *args)
const char *data_path, const char *subdir_name,
const int buffer_size)
{ {
writer->flush.in_queue = false; SFBinlogWriterBuffer *wbuffer;
return sf_file_writer_init_normal(&writer->fw, wbuffer = (SFBinlogWriterBuffer *)element;
data_path, subdir_name, buffer_size); if (wbuffer->bf.buff != NULL) {
free(wbuffer->bf.buff);
}
} }
int sf_binlog_writer_init_by_version(SFBinlogWriterInfo *writer, int sf_binlog_writer_init_normal_ex(SFBinlogWriterInfo *writer,
const char *data_path, const char *subdir_name, const char *data_path, const char *subdir_name,
const char *file_prefix, const int max_record_size,
const int buffer_size, const int64_t file_rotate_size,
const bool call_fsync)
{
memset(writer, 0, sizeof(*writer));
writer->order_by = SF_BINLOG_WRITER_TYPE_ORDER_BY_NONE;
return sf_file_writer_init(&writer->fw, data_path, subdir_name,
file_prefix, max_record_size, buffer_size,
file_rotate_size, call_fsync);
}
int sf_binlog_writer_init_by_version_ex(SFBinlogWriterInfo *writer,
const char *data_path, const char *subdir_name,
const char *file_prefix, const int max_record_size,
const uint64_t next_version, const int buffer_size, const uint64_t next_version, const int buffer_size,
const int ring_size) const int ring_size, const int64_t file_rotate_size,
const bool call_fsync)
{ {
int bytes; int bytes;
@ -367,36 +473,49 @@ int sf_binlog_writer_init_by_version(SFBinlogWriterInfo *writer,
writer->version_ctx.ring.waiting_count = 0; writer->version_ctx.ring.waiting_count = 0;
writer->version_ctx.ring.max_waitings = 0; writer->version_ctx.ring.max_waitings = 0;
writer->version_ctx.change_count = 0; writer->version_ctx.change_count = 0;
writer->order_by = SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION;
binlog_writer_set_next_version(writer, next_version); binlog_writer_set_next_version(writer, next_version);
return sf_binlog_writer_init_normal(writer, writer->flush.in_queue = false;
data_path, subdir_name, buffer_size); return sf_file_writer_init(&writer->fw, data_path, subdir_name,
file_prefix, max_record_size, buffer_size,
file_rotate_size, call_fsync);
} }
int sf_binlog_writer_init_thread_ex(SFBinlogWriterThread *thread, int sf_binlog_writer_init_thread_ex(SFBinlogWriterThread *thread,
const char *name, SFBinlogWriterInfo *writer, const short order_mode, const char *name, SFBinlogWriterInfo *writer, const short order_mode,
const short order_by, const int max_record_size, const int write_interval_ms, const int max_delay,
const int writer_count, const bool use_fixed_buffer_size) const int max_record_size, const bool use_fixed_buffer_size,
const bool passive_write)
{ {
const int alloc_elements_once = 1024; const int alloc_elements_once = 1024;
const int64_t alloc_elements_limit = 0;
const int prealloc_trunk_count = 0;
int result;
int element_size; int element_size;
pthread_t tid; pthread_t tid;
int result; struct fast_mblock_object_callbacks callbacks;
snprintf(thread->name, sizeof(thread->name), "%s", name); fc_safe_strcpy(thread->name, name);
thread->order_mode = order_mode; thread->order_mode = order_mode;
thread->order_by = order_by;
thread->use_fixed_buffer_size = use_fixed_buffer_size; thread->use_fixed_buffer_size = use_fixed_buffer_size;
thread->passive_write = passive_write;
thread->write_interval_ms = write_interval_ms;
thread->flow_ctrol.max_delay = max_delay;
writer->fw.cfg.max_record_size = max_record_size; writer->fw.cfg.max_record_size = max_record_size;
writer->thread = thread; writer->thread = thread;
callbacks.init_func = binlog_wbuffer_alloc_init;
callbacks.args = writer;
element_size = sizeof(SFBinlogWriterBuffer); element_size = sizeof(SFBinlogWriterBuffer);
if (use_fixed_buffer_size) { if (use_fixed_buffer_size) {
element_size += max_record_size; element_size += max_record_size;
callbacks.destroy_func = NULL;
} else {
callbacks.destroy_func = binlog_wbuffer_destroy_func;
} }
if ((result=fast_mblock_init_ex1(&thread->mblock, "binlog-wbuffer", if ((result=fast_mblock_init_ex2(&thread->mblock, "binlog-wbuffer",
element_size, alloc_elements_once, 0, element_size, alloc_elements_once, alloc_elements_limit,
binlog_wbuffer_alloc_init, writer, true)) != 0) prealloc_trunk_count, &callbacks, true, NULL)) != 0)
{ {
return result; return result;
} }
@ -407,6 +526,12 @@ int sf_binlog_writer_init_thread_ex(SFBinlogWriterThread *thread,
return result; return result;
} }
thread->flow_ctrol.last_timestamp = 0;
thread->flow_ctrol.waiting_count = 0;
if ((result=init_pthread_lock_cond_pair(&thread->flow_ctrol.lcp)) != 0) {
return result;
}
thread->flush_writers.head = thread->flush_writers.tail = NULL; thread->flush_writers.head = thread->flush_writers.tail = NULL;
return fc_create_thread(&tid, binlog_writer_func, thread, return fc_create_thread(&tid, binlog_writer_func, thread,
SF_G_THREAD_STACK_SIZE); SF_G_THREAD_STACK_SIZE);
@ -417,12 +542,12 @@ int sf_binlog_writer_change_order_by(SFBinlogWriterInfo *writer,
{ {
SFBinlogWriterBuffer *buffer; SFBinlogWriterBuffer *buffer;
if (writer->thread->order_by == order_by) { if (writer->order_by == order_by) {
return 0; return 0;
} }
if (!(order_by == SF_BINLOG_THREAD_TYPE_ORDER_BY_NONE || if (!(order_by == SF_BINLOG_WRITER_TYPE_ORDER_BY_NONE ||
order_by == SF_BINLOG_THREAD_TYPE_ORDER_BY_VERSION)) order_by == SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION))
{ {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"invalid order by: %d!", __LINE__, order_by); "invalid order by: %d!", __LINE__, order_by);
@ -437,27 +562,147 @@ int sf_binlog_writer_change_order_by(SFBinlogWriterInfo *writer,
return EINVAL; return EINVAL;
} }
if (order_by == SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION) {
if (writer->version_ctx.ring.slots == NULL) {
logError("file: "__FILE__", line: %d, "
"the writer is NOT versioned writer, can't "
"set order by to %d!", __LINE__, order_by);
return EINVAL;
}
}
if ((buffer=sf_binlog_writer_alloc_versioned_buffer_ex(writer, order_by, if ((buffer=sf_binlog_writer_alloc_versioned_buffer_ex(writer, order_by,
order_by, SF_BINLOG_BUFFER_TYPE_CHANGE_ORDER_TYPE)) == NULL) order_by, SF_BINLOG_BUFFER_TYPE_CHANGE_ORDER_TYPE)) == NULL)
{ {
return ENOMEM; return ENOMEM;
} }
fc_queue_push(&writer->thread->queue, buffer); sf_push_to_binlog_write_queue(writer, buffer);
return 0; return 0;
} }
static inline int sf_binlog_writer_push_directive(SFBinlogWriterInfo *writer,
const int buffer_type, const int64_t version)
{
SFBinlogWriterBuffer *buffer;
if ((buffer=sf_binlog_writer_alloc_versioned_buffer_ex(writer,
version, version, buffer_type)) == NULL)
{
return ENOMEM;
}
sf_push_to_binlog_write_queue(writer, buffer);
return 0;
}
int sf_binlog_writer_change_passive_write(SFBinlogWriterInfo *writer,
const bool passive_write)
{
return sf_binlog_writer_push_directive(writer,
SF_BINLOG_BUFFER_TYPE_CHANGE_PASSIVE_WRITE,
passive_write);
}
int sf_binlog_writer_change_call_fsync(SFBinlogWriterInfo *writer,
const bool call_fsync)
{
return sf_binlog_writer_push_directive(writer,
SF_BINLOG_BUFFER_TYPE_CHANGE_CALL_FSYNC,
call_fsync);
}
int sf_binlog_writer_change_next_version(SFBinlogWriterInfo *writer, int sf_binlog_writer_change_next_version(SFBinlogWriterInfo *writer,
const int64_t next_version) const int64_t next_version)
{ {
SFBinlogWriterBuffer *buffer; return sf_binlog_writer_push_directive(writer,
SF_BINLOG_BUFFER_TYPE_SET_NEXT_VERSION,
next_version);
}
if ((buffer=sf_binlog_writer_alloc_versioned_buffer_ex(writer, next_version, int sf_binlog_writer_change_write_index(SFBinlogWriterInfo *writer,
next_version, SF_BINLOG_BUFFER_TYPE_SET_NEXT_VERSION)) == NULL) const int write_index)
{
return sf_binlog_writer_push_directive(writer,
SF_BINLOG_BUFFER_TYPE_SET_WRITE_INDEX, write_index);
}
int sf_binlog_writer_rotate_file_ex(SFBinlogWriterInfo *writer,
const bool skip_empty_file)
{
return sf_binlog_writer_push_directive(writer,
SF_BINLOG_BUFFER_TYPE_ROTATE_FILE,
skip_empty_file ? 1 : 0);
}
int sf_binlog_writer_flush_file(SFBinlogWriterInfo *writer)
{
return sf_binlog_writer_push_directive(writer,
SF_BINLOG_BUFFER_TYPE_FLUSH_FILE, 0);
}
int sf_binlog_writer_notify_exit(SFBinlogWriterInfo *writer)
{
return sf_binlog_writer_push_directive(writer,
SF_BINLOG_BUFFER_TYPE_NOTIFY_EXIT, 0);
}
void sf_push_to_binlog_write_queue(SFBinlogWriterInfo *writer,
SFBinlogWriterBuffer *buffer)
{
time_t current_time;
int64_t last_timestamp;
current_time = g_current_time;
last_timestamp = FC_ATOMIC_GET(writer->thread->flow_ctrol.last_timestamp);
if ((last_timestamp > 0 && current_time - last_timestamp > writer->
thread->flow_ctrol.max_delay) && !(writer->order_by ==
SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION && buffer->
version.first - writer->version_ctx.next < 128))
{ {
return ENOMEM; time_t last_log_timestamp;
int time_used;
int log_level;
PTHREAD_MUTEX_LOCK(&writer->thread->flow_ctrol.lcp.lock);
writer->thread->flow_ctrol.waiting_count++;
last_timestamp = FC_ATOMIC_GET(writer->thread->
flow_ctrol.last_timestamp);
while ((last_timestamp > 0 && current_time - last_timestamp > writer->
thread->flow_ctrol.max_delay) && !(writer->order_by ==
SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION && buffer->
version.first - writer->version_ctx.next < 128))
{
pthread_cond_wait(&writer->thread->flow_ctrol.lcp.cond,
&writer->thread->flow_ctrol.lcp.lock);
last_timestamp = FC_ATOMIC_GET(writer->thread->
flow_ctrol.last_timestamp);
}
writer->thread->flow_ctrol.waiting_count--;
PTHREAD_MUTEX_UNLOCK(&writer->thread->flow_ctrol.lcp.lock);
time_used = g_current_time - current_time;
if (time_used > 0) {
last_log_timestamp = FC_ATOMIC_GET(
LAST_BINLOG_WRITER_LOG_TIMESTAMP);
if (g_current_time != last_log_timestamp &&
__sync_bool_compare_and_swap(
&LAST_BINLOG_WRITER_LOG_TIMESTAMP,
last_log_timestamp, g_current_time))
{
if (time_used <= writer->thread->flow_ctrol.max_delay) {
log_level = LOG_DEBUG;
} else {
log_level = LOG_WARNING;
}
log_it_ex(&g_log_context, log_level, "file: "__FILE__", line: %d, "
"subdir_name: %s, max_delay: %d s, flow ctrol waiting "
"time: %d s", __LINE__, writer->fw.cfg.subdir_name,
writer->thread->flow_ctrol.max_delay, time_used);
}
}
} }
buffer->timestamp = g_current_time;
fc_queue_push(&writer->thread->queue, buffer); fc_queue_push(&writer->thread->queue, buffer);
return 0;
} }

View File

@ -19,18 +19,25 @@
#define _SF_BINLOG_WRITER_H_ #define _SF_BINLOG_WRITER_H_
#include "fastcommon/fc_queue.h" #include "fastcommon/fc_queue.h"
#include "fastcommon/fc_atomic.h"
#include "sf_types.h" #include "sf_types.h"
#include "sf_file_writer.h" #include "sf_file_writer.h"
#define SF_BINLOG_THREAD_ORDER_MODE_FIXED 0 #define SF_BINLOG_THREAD_ORDER_MODE_FIXED 0
#define SF_BINLOG_THREAD_ORDER_MODE_VARY 1 #define SF_BINLOG_THREAD_ORDER_MODE_VARY 1
#define SF_BINLOG_THREAD_TYPE_ORDER_BY_NONE 0 #define SF_BINLOG_WRITER_TYPE_ORDER_BY_NONE 0
#define SF_BINLOG_THREAD_TYPE_ORDER_BY_VERSION 1 #define SF_BINLOG_WRITER_TYPE_ORDER_BY_VERSION 1
#define SF_BINLOG_BUFFER_TYPE_WRITE_TO_FILE 0 //default type, must be 0 #define SF_BINLOG_BUFFER_TYPE_WRITE_TO_FILE 0 //default type, must be 0
#define SF_BINLOG_BUFFER_TYPE_SET_NEXT_VERSION 1 #define SF_BINLOG_BUFFER_TYPE_SET_NEXT_VERSION 1
#define SF_BINLOG_BUFFER_TYPE_CHANGE_ORDER_TYPE 2 #define SF_BINLOG_BUFFER_TYPE_CHANGE_ORDER_TYPE 2
#define SF_BINLOG_BUFFER_TYPE_CHANGE_PASSIVE_WRITE 3
#define SF_BINLOG_BUFFER_TYPE_CHANGE_CALL_FSYNC 4
#define SF_BINLOG_BUFFER_TYPE_SET_WRITE_INDEX 5
#define SF_BINLOG_BUFFER_TYPE_ROTATE_FILE 6
#define SF_BINLOG_BUFFER_TYPE_NOTIFY_EXIT 7
#define SF_BINLOG_BUFFER_TYPE_FLUSH_FILE 8
#define SF_BINLOG_BUFFER_SET_VERSION(buffer, ver) \ #define SF_BINLOG_BUFFER_SET_VERSION(buffer, ver) \
(buffer)->version.first = (buffer)->version.last = ver (buffer)->version.first = (buffer)->version.last = ver
@ -40,8 +47,8 @@ struct sf_binlog_writer_info;
typedef struct sf_binlog_writer_buffer { typedef struct sf_binlog_writer_buffer {
SFVersionRange version; SFVersionRange version;
BufferInfo bf; BufferInfo bf;
int64_t tag; int type;
int type; //for versioned writer uint32_t timestamp; //for flow ctrol
struct sf_binlog_writer_info *writer; struct sf_binlog_writer_info *writer;
struct sf_binlog_writer_buffer *next; struct sf_binlog_writer_buffer *next;
} SFBinlogWriterBuffer; } SFBinlogWriterBuffer;
@ -63,8 +70,15 @@ typedef struct binlog_writer_thread {
char name[64]; char name[64];
volatile bool running; volatile bool running;
bool use_fixed_buffer_size; bool use_fixed_buffer_size;
short order_mode; bool passive_write;
short order_by; char order_mode;
int write_interval_ms;
struct {
int max_delay; //in seconds
volatile uint32_t last_timestamp;
int waiting_count;
pthread_lock_cond_pair_t lcp;
} flow_ctrol;
struct { struct {
struct sf_binlog_writer_info *head; struct sf_binlog_writer_info *head;
struct sf_binlog_writer_info *tail; struct sf_binlog_writer_info *tail;
@ -76,11 +90,12 @@ typedef struct sf_binlog_writer_info {
struct { struct {
SFBinlogWriterBufferRing ring; SFBinlogWriterBufferRing ring;
int64_t next; volatile int64_t next;
int64_t change_count; //version change count int64_t change_count; //version change count
} version_ctx; } version_ctx;
SFBinlogWriterThread *thread; SFBinlogWriterThread *thread;
short order_by;
struct { struct {
bool in_queue; bool in_queue;
struct sf_binlog_writer_info *next; struct sf_binlog_writer_info *next;
@ -96,58 +111,172 @@ typedef struct sf_binlog_writer_context {
extern "C" { extern "C" {
#endif #endif
int sf_binlog_writer_init_normal(SFBinlogWriterInfo *writer, int sf_binlog_writer_init_normal_ex(SFBinlogWriterInfo *writer,
const char *data_path, const char *subdir_name, const char *data_path, const char *subdir_name,
const int buffer_size); const char *file_prefix, const int max_record_size,
const int buffer_size, const int64_t file_rotate_size,
const bool call_fsync);
int sf_binlog_writer_init_by_version(SFBinlogWriterInfo *writer, int sf_binlog_writer_init_by_version_ex(SFBinlogWriterInfo *writer,
const char *data_path, const char *subdir_name, const char *data_path, const char *subdir_name,
const char *file_prefix, const int max_record_size,
const uint64_t next_version, const int buffer_size, const uint64_t next_version, const int buffer_size,
const int ring_size); const int ring_size, const int64_t file_rotate_size,
const bool call_fsync);
int sf_binlog_writer_init_thread_ex(SFBinlogWriterThread *thread, int sf_binlog_writer_init_thread_ex(SFBinlogWriterThread *thread,
const char *name, SFBinlogWriterInfo *writer, const short order_mode, const char *name, SFBinlogWriterInfo *writer, const short order_mode,
const short order_by, const int max_record_size, const int write_interval_ms, const int max_delay,
const int writer_count, const bool use_fixed_buffer_size); const int max_record_size, const bool use_fixed_buffer_size,
const bool passive_write);
#define sf_binlog_writer_init_normal(writer, data_path, \
subdir_name, max_record_size, buffer_size) \
sf_binlog_writer_init_normal_ex(writer, data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, max_record_size, buffer_size, \
SF_BINLOG_DEFAULT_ROTATE_SIZE, true)
#define sf_binlog_writer_init_by_version(writer, data_path, subdir_name, \
max_record_size, next_version, buffer_size, ring_size) \
sf_binlog_writer_init_by_version_ex(writer, data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, max_record_size, next_version, \
buffer_size, ring_size, SF_BINLOG_DEFAULT_ROTATE_SIZE, true)
#define sf_binlog_writer_init_thread(thread, name, \ #define sf_binlog_writer_init_thread(thread, name, \
writer, order_by, max_record_size) \ writer, write_interval_ms, max_delay, max_record_size) \
sf_binlog_writer_init_thread_ex(thread, name, writer, \ sf_binlog_writer_init_thread_ex(thread, name, writer, \
SF_BINLOG_THREAD_ORDER_MODE_FIXED, \ SF_BINLOG_THREAD_ORDER_MODE_FIXED, write_interval_ms, \
order_by, max_record_size, 1, true) max_delay, max_record_size, true, false)
static inline int sf_binlog_writer_init(SFBinlogWriterContext *context, static inline int sf_binlog_writer_init_ex(SFBinlogWriterContext *context,
const char *data_path, const char *subdir_name, const char *data_path, const char *subdir_name,
const int buffer_size, const int max_record_size) const char *file_prefix, const int buffer_size,
const int write_interval_ms, const int max_delay,
const int max_record_size, const bool call_fsync)
{ {
int result; int result;
if ((result=sf_binlog_writer_init_normal(&context->writer, if ((result=sf_binlog_writer_init_normal_ex(&context->writer, data_path,
data_path, subdir_name, buffer_size)) != 0) subdir_name, file_prefix, max_record_size, buffer_size,
SF_BINLOG_DEFAULT_ROTATE_SIZE, call_fsync)) != 0)
{ {
return result; return result;
} }
return sf_binlog_writer_init_thread(&context->thread, subdir_name, return sf_binlog_writer_init_thread(&context->thread, subdir_name,
&context->writer, SF_BINLOG_THREAD_TYPE_ORDER_BY_NONE, &context->writer, write_interval_ms, max_delay, max_record_size);
max_record_size); }
#define sf_binlog_writer_init(context, data_path, subdir_name, \
buffer_size, write_interval_ms, max_delay, max_record_size) \
sf_binlog_writer_init_ex(context, data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, buffer_size, write_interval_ms, \
max_delay, max_record_size, true)
void sf_binlog_writer_finish(SFBinlogWriterInfo *writer);
static inline void sf_binlog_writer_destroy_writer(
SFBinlogWriterInfo *writer)
{
sf_file_writer_destroy(&writer->fw);
if (writer->version_ctx.ring.slots != NULL) {
free(writer->version_ctx.ring.slots);
writer->version_ctx.ring.slots = NULL;
}
}
static inline void sf_binlog_writer_destroy_thread(
SFBinlogWriterThread *thread)
{
fast_mblock_destroy(&thread->mblock);
fc_queue_destroy(&thread->queue);
}
static inline void sf_binlog_writer_destroy(
SFBinlogWriterContext *context)
{
sf_binlog_writer_finish(&context->writer);
sf_binlog_writer_destroy_writer(&context->writer);
sf_binlog_writer_destroy_thread(&context->thread);
} }
int sf_binlog_writer_change_order_by(SFBinlogWriterInfo *writer, int sf_binlog_writer_change_order_by(SFBinlogWriterInfo *writer,
const short order_by); const short order_by);
int sf_binlog_writer_change_passive_write(SFBinlogWriterInfo *writer,
const bool passive_write);
int sf_binlog_writer_change_call_fsync(SFBinlogWriterInfo *writer,
const bool call_fsync);
int sf_binlog_writer_change_next_version(SFBinlogWriterInfo *writer, int sf_binlog_writer_change_next_version(SFBinlogWriterInfo *writer,
const int64_t next_version); const int64_t next_version);
static inline int64_t sf_binlog_writer_get_next_version(
SFBinlogWriterInfo *writer)
{
return writer->version_ctx.next;
}
static inline int sf_binlog_writer_get_waiting_count(
SFBinlogWriterInfo *writer)
{
return writer->version_ctx.ring.waiting_count;
}
static inline int sf_binlog_writer_get_thread_waiting_count(
SFBinlogWriterThread *thread)
{
int waiting_count;
PTHREAD_MUTEX_LOCK(&thread->flow_ctrol.lcp.lock);
waiting_count = thread->flow_ctrol.waiting_count;
PTHREAD_MUTEX_UNLOCK(&thread->flow_ctrol.lcp.lock);
return waiting_count;
}
int sf_binlog_writer_rotate_file_ex(SFBinlogWriterInfo *writer,
const bool skip_empty_file);
static inline int sf_binlog_writer_rotate_file(SFBinlogWriterInfo *writer)
{
const bool skip_empty_file = false;
return sf_binlog_writer_rotate_file_ex(writer, skip_empty_file);
}
int sf_binlog_writer_flush_file(SFBinlogWriterInfo *writer);
int sf_binlog_writer_change_write_index(SFBinlogWriterInfo *writer,
const int write_index);
int sf_binlog_writer_notify_exit(SFBinlogWriterInfo *writer);
#define sf_binlog_writer_set_flags(writer, flags) \ #define sf_binlog_writer_set_flags(writer, flags) \
sf_file_writer_set_flags(&(writer)->fw, flags) sf_file_writer_set_flags(&(writer)->fw, flags)
#define sf_binlog_writer_set_write_done_callback(writer, callback, args) \
sf_file_writer_set_write_done_callback(&(writer)->fw, callback, args)
#define sf_binlog_writer_get_last_version_ex(writer, log_level) \
sf_file_writer_get_last_version_ex(&(writer)->fw, log_level)
#define sf_binlog_writer_get_last_version(writer) \ #define sf_binlog_writer_get_last_version(writer) \
sf_file_writer_get_last_version(&(writer)->fw) sf_file_writer_get_last_version(&(writer)->fw)
void sf_binlog_writer_finish(SFBinlogWriterInfo *writer); #define sf_binlog_writer_get_last_version_silence(writer) \
sf_file_writer_get_last_version_silence(&(writer)->fw)
#define sf_binlog_get_indexes(writer, start_index, last_index) \
sf_file_writer_get_indexes(&(writer)->fw, start_index, last_index)
#define sf_binlog_get_start_index(writer) \
sf_file_writer_get_start_index(&(writer)->fw)
#define sf_binlog_get_last_index(writer) \
sf_file_writer_get_last_index(&(writer)->fw)
#define sf_binlog_get_current_write_index(writer) \ #define sf_binlog_get_current_write_index(writer) \
sf_file_writer_get_current_index(&(writer)->fw) sf_file_writer_get_current_write_index(&(writer)->fw)
#define sf_binlog_get_current_write_position(writer, position) \ #define sf_binlog_get_current_write_position(writer, position) \
sf_file_writer_get_current_position(&(writer)->fw, position) sf_file_writer_get_current_position(&(writer)->fw, position)
@ -155,7 +284,14 @@ void sf_binlog_writer_finish(SFBinlogWriterInfo *writer);
static inline SFBinlogWriterBuffer *sf_binlog_writer_alloc_buffer( static inline SFBinlogWriterBuffer *sf_binlog_writer_alloc_buffer(
SFBinlogWriterThread *thread) SFBinlogWriterThread *thread)
{ {
return (SFBinlogWriterBuffer *)fast_mblock_alloc_object(&thread->mblock); SFBinlogWriterBuffer *buffer;
if ((buffer=(SFBinlogWriterBuffer *)fast_mblock_alloc_object(
&thread->mblock)) != NULL)
{
buffer->type = SF_BINLOG_BUFFER_TYPE_WRITE_TO_FILE;
}
return buffer;
} }
#define sf_binlog_writer_alloc_one_version_buffer(writer, version) \ #define sf_binlog_writer_alloc_one_version_buffer(writer, version) \
@ -172,6 +308,7 @@ static inline SFBinlogWriterBuffer *sf_binlog_writer_alloc_versioned_buffer_ex(
const int64_t last_version, const int type) const int64_t last_version, const int type)
{ {
SFBinlogWriterBuffer *buffer; SFBinlogWriterBuffer *buffer;
buffer = (SFBinlogWriterBuffer *)fast_mblock_alloc_object( buffer = (SFBinlogWriterBuffer *)fast_mblock_alloc_object(
&writer->thread->mblock); &writer->thread->mblock);
if (buffer != NULL) { if (buffer != NULL) {
@ -183,26 +320,50 @@ static inline SFBinlogWriterBuffer *sf_binlog_writer_alloc_versioned_buffer_ex(
return buffer; return buffer;
} }
#define sf_binlog_writer_get_filepath(data_path, subdir_name, filename, size) \ void sf_push_to_binlog_write_queue(SFBinlogWriterInfo *writer,
sf_file_writer_get_filepath(data_path, subdir_name, filename, size) SFBinlogWriterBuffer *buffer);
#define sf_binlog_writer_get_filepath(data_path, subdir_name, filepath, size) \
sf_file_writer_get_filepath(data_path, subdir_name, filepath, size)
#define sf_binlog_writer_get_filename_ex(data_path, subdir_name, \
file_prefix, binlog_index, filename, size) \
sf_file_writer_get_filename_ex(data_path, subdir_name, \
file_prefix, binlog_index, filename, size)
#define sf_binlog_writer_get_filename(data_path, \ #define sf_binlog_writer_get_filename(data_path, \
subdir_name, binlog_index, filename, size) \ subdir_name, binlog_index, filename, size) \
sf_file_writer_get_filename(data_path, subdir_name, \ sf_file_writer_get_filename(data_path, subdir_name, \
binlog_index, filename, size) binlog_index, filename, size)
#define sf_binlog_writer_set_binlog_index(writer, binlog_index) \ #define sf_binlog_writer_get_index_filename(data_path, \
sf_file_writer_set_binlog_index(&(writer)->fw, binlog_index) subdir_name, filename, size) \
sf_file_writer_get_index_filename(data_path, \
subdir_name, filename, size)
#define sf_push_to_binlog_thread_queue(thread, buffer) \ #define sf_binlog_writer_get_binlog_indexes(data_path, \
fc_queue_push(&(thread)->queue, buffer) subdir_name, start_index, last_index) \
sf_file_writer_get_binlog_indexes(data_path, \
subdir_name, start_index, last_index)
static inline void sf_push_to_binlog_write_queue(SFBinlogWriterInfo *writer, #define sf_binlog_writer_get_binlog_start_index(data_path, \
SFBinlogWriterBuffer *buffer) subdir_name, start_index) \
{ sf_file_writer_get_binlog_start_index(data_path, \
buffer->type = SF_BINLOG_BUFFER_TYPE_WRITE_TO_FILE; subdir_name, start_index)
fc_queue_push(&writer->thread->queue, buffer);
} #define sf_binlog_writer_get_binlog_last_index(data_path, \
subdir_name, last_index) \
sf_file_writer_get_binlog_last_index(data_path, \
subdir_name, last_index)
#define sf_binlog_set_indexes(writer, start_index, last_index) \
sf_file_writer_set_indexes(&(writer)->fw, start_index, last_index)
#define sf_binlog_writer_set_binlog_start_index(writer, start_index) \
sf_file_writer_set_binlog_start_index(&(writer)->fw, start_index)
#define sf_binlog_writer_set_binlog_write_index(writer, last_index) \
sf_file_writer_set_binlog_write_index(&(writer)->fw, last_index)
#define sf_binlog_writer_get_last_lines(data_path, subdir_name, \ #define sf_binlog_writer_get_last_lines(data_path, subdir_name, \
current_write_index, buff, buff_size, count, length) \ current_write_index, buff, buff_size, count, length) \

106
src/sf_buffered_writer.h Normal file
View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
//sf_buffered_writer.h
#ifndef _SF_BUFFERED_WRITER_H_
#define _SF_BUFFERED_WRITER_H_
#include "sf_types.h"
#include "sf_func.h"
typedef struct {
int fd;
const char *filename;
SFBinlogBuffer buffer;
} SFBufferedWriter;
#define sf_buffered_writer_init(writer, filename) \
sf_buffered_writer_init_ex(writer, filename, 1024 * 1024)
#define SF_BUFFERED_WRITER_LENGTH(bw) \
SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH((bw).buffer)
#define SF_BUFFERED_WRITER_REMAIN(bw) \
SF_BINLOG_BUFFER_PRODUCER_BUFF_REMAIN((bw).buffer)
#ifdef __cplusplus
extern "C" {
#endif
static inline int sf_buffered_writer_init_ex(SFBufferedWriter *writer,
const char *filename, const int buffer_size)
{
int result;
writer->filename = filename;
writer->fd = open(filename, O_WRONLY | O_CREAT |
O_TRUNC | O_CLOEXEC, 0644);
if (writer->fd < 0) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"open file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
return result;
}
if ((result=sf_binlog_buffer_init(&writer->buffer, buffer_size)) != 0) {
return result;
}
return 0;
}
static inline int sf_buffered_writer_save(SFBufferedWriter *writer)
{
int result;
int length;
length = writer->buffer.data_end - writer->buffer.buff;
if (fc_safe_write(writer->fd, writer->buffer.buff, length) != length) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"write to file %s fail, errno: %d, error info: %s",
__LINE__, writer->filename, result, STRERROR(result));
return result;
}
writer->buffer.data_end = writer->buffer.buff;
return 0;
}
static inline int sf_buffered_writer_destroy(SFBufferedWriter *writer)
{
int result;
if (writer->fd >= 0) {
if (fsync(writer->fd) != 0) {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"fsync to file %s fail, errno: %d, error info: %s",
__LINE__, writer->filename, result, STRERROR(result));
return result;
}
close(writer->fd);
writer->fd = -1;
}
sf_binlog_buffer_destroy(&writer->buffer);
return 0;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -23,7 +23,7 @@ static int calc_cluster_config_sign(SFClusterConfig *cluster)
FastBuffer buffer; FastBuffer buffer;
int result; int result;
if ((result=fast_buffer_init_ex(&buffer, 1024)) != 0) { if ((result=fast_buffer_init1(&buffer, 1024)) != 0) {
return result; return result;
} }
fc_server_to_config_string(&cluster->server_cfg, &buffer); fc_server_to_config_string(&cluster->server_cfg, &buffer);
@ -116,19 +116,19 @@ int sf_load_cluster_config_by_file(SFClusterConfig *cluster,
return 0; return 0;
} }
int sf_load_cluster_config_ex(SFClusterConfig *cluster, IniFullContext int sf_load_cluster_config_ex1(SFClusterConfig *cluster,
*ini_ctx, const int default_port, char *full_cluster_filename, IniFullContext *ini_ctx, const char *cluster_config_item_name,
const int size) const int default_port, char *full_cluster_filename, const int size)
{ {
const bool share_between_groups = true; const bool share_between_groups = true;
char *cluster_config_filename; char *cluster_config_filename;
cluster_config_filename = iniGetStrValue(ini_ctx->section_name, cluster_config_filename = iniGetStrValue(ini_ctx->section_name,
"cluster_config_filename", ini_ctx->context); cluster_config_item_name, ini_ctx->context);
if (cluster_config_filename == NULL || *cluster_config_filename == '\0') { if (cluster_config_filename == NULL || *cluster_config_filename == '\0') {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"config file: %s, item \"cluster_config_filename\" " "config file: %s, item \"%s\" not exist or empty",
"not exist or empty", __LINE__, ini_ctx->filename); __LINE__, cluster_config_item_name, ini_ctx->filename);
return ENOENT; return ENOENT;
} }

View File

@ -23,9 +23,29 @@
extern "C" { extern "C" {
#endif #endif
int sf_load_cluster_config_ex(SFClusterConfig *cluster, IniFullContext int sf_load_cluster_config_ex1(SFClusterConfig *cluster,
*ini_ctx, const int default_port, char *full_cluster_filename, IniFullContext *ini_ctx, const char *cluster_config_item_name,
const int size); const int default_port, char *full_cluster_filename, const int size);
static inline int sf_load_cluster_config_ex(SFClusterConfig *cluster,
IniFullContext *ini_ctx, const int default_port,
char *full_cluster_filename, const int size)
{
const char *cluster_config_item_name = "cluster_config_filename";
return sf_load_cluster_config_ex1(cluster, ini_ctx,
cluster_config_item_name, default_port,
full_cluster_filename, PATH_MAX);
}
static inline int sf_load_cluster_config1(SFClusterConfig *cluster,
IniFullContext *ini_ctx, const char *cluster_config_item_name,
const int default_port)
{
char full_cluster_filename[PATH_MAX];
return sf_load_cluster_config_ex1(cluster, ini_ctx,
cluster_config_item_name, default_port,
full_cluster_filename, PATH_MAX);
}
static inline int sf_load_cluster_config(SFClusterConfig *cluster, static inline int sf_load_cluster_config(SFClusterConfig *cluster,
IniFullContext *ini_ctx, const int default_port) IniFullContext *ini_ctx, const int default_port)

View File

@ -25,10 +25,10 @@
#include "fastcommon/logger.h" #include "fastcommon/logger.h"
#include "sf_configs.h" #include "sf_configs.h"
#define DEFAULT_RETRY_MAX_INTERVAL_MS 5000 #define DEFAULT_RETRY_MAX_INTERVAL_MS 3000
#define DEFAULT_CONNECT_RETRY_TIMES 10 #define DEFAULT_CONNECT_RETRY_TIMES 200
#define DEFAULT_CONNECT_RETRY_INTERVAL_MS 100 #define DEFAULT_CONNECT_RETRY_INTERVAL_MS 100
#define DEFAULT_NETWORK_RETRY_TIMES 10 #define DEFAULT_NETWORK_RETRY_TIMES 200
#define DEFAULT_NETWORK_RETRY_INTERVAL_MS 100 #define DEFAULT_NETWORK_RETRY_INTERVAL_MS 100
int sf_load_net_retry_config(SFNetRetryConfig *net_retry_cfg, int sf_load_net_retry_config(SFNetRetryConfig *net_retry_cfg,
@ -94,13 +94,14 @@ void sf_net_retry_config_to_string(SFNetRetryConfig *net_retry_cfg,
net_retry_cfg->network.interval_ms); net_retry_cfg->network.interval_ms);
} }
void sf_load_read_rule_config_ex(SFDataReadRule *rule, int sf_load_read_rule_config_ex(SFDataReadRule *rule,
IniFullContext *ini_ctx, const SFDataReadRule def_rule) IniFullContext *ini_ctx, const SFDataReadRule def_rule)
{ {
char *read_rule; char *read_rule;
read_rule = iniGetStrValueEx(ini_ctx->section_name, read_rule = iniGetStrValueEx(ini_ctx->section_name,
"read_rule", ini_ctx->context, true); "read_rule", ini_ctx->context, true);
if (read_rule == NULL || *read_rule == '\0') { if (read_rule == NULL) {
*rule = def_rule; *rule = def_rule;
} else if (strncasecmp(read_rule, "any", 3) == 0) { } else if (strncasecmp(read_rule, "any", 3) == 0) {
*rule = sf_data_read_rule_any_available; *rule = sf_data_read_rule_any_available;
@ -110,8 +111,62 @@ void sf_load_read_rule_config_ex(SFDataReadRule *rule,
*rule = sf_data_read_rule_master_only; *rule = sf_data_read_rule_master_only;
} else { } else {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"config file: %s, unkown read_rule: %s, set to any", "config file: %s, unkown read_rule: %s",
__LINE__, ini_ctx->filename, read_rule); __LINE__, ini_ctx->filename, read_rule);
*rule = sf_data_read_rule_any_available; return EINVAL;
} }
return 0;
}
int sf_load_election_quorum_config_ex(SFElectionQuorum *quorum,
IniFullContext *ini_ctx, const SFElectionQuorum def_quorum)
{
char *str;
str = iniGetStrValue(ini_ctx->section_name,
"quorum", ini_ctx->context);
if (str == NULL) {
*quorum = def_quorum;
} else if (strncasecmp(str, "auto", 4) == 0) {
*quorum = sf_election_quorum_auto;
} else if (strncasecmp(str, "any", 3) == 0) {
*quorum = sf_election_quorum_any;
} else if (strncasecmp(str, "majority", 8) == 0) {
*quorum = sf_election_quorum_majority;
} else {
logError("file: "__FILE__", line: %d, "
"config file: %s, unkown quorum: %s",
__LINE__, ini_ctx->filename, str);
return EINVAL;
}
return 0;
}
int sf_load_replication_quorum_config_ex(SFReplicationQuorum *quorum,
IniFullContext *ini_ctx, const SFReplicationQuorum def_quorum)
{
char *str;
str = iniGetStrValue(ini_ctx->section_name,
"quorum", ini_ctx->context);
if (str == NULL) {
*quorum = def_quorum;
} else if (strncasecmp(str, "auto", 4) == 0) {
*quorum = sf_replication_quorum_auto;
} else if (strncasecmp(str, "any", 3) == 0) {
*quorum = sf_replication_quorum_any;
} else if (strncasecmp(str, "majority", 8) == 0) {
*quorum = sf_replication_quorum_majority;
} else if (strncasecmp(str, "smart", 5) == 0) {
*quorum = sf_replication_quorum_smart;
} else {
logError("file: "__FILE__", line: %d, "
"config file: %s, unkown quorum: %s",
__LINE__, ini_ctx->filename, str);
return EINVAL;
}
return 0;
} }

View File

@ -67,7 +67,7 @@ static inline int sf_calc_next_retry_interval(SFNetRetryIntervalContext *ctx)
return ctx->interval_ms; return ctx->interval_ms;
} }
void sf_load_read_rule_config_ex(SFDataReadRule *rule, int sf_load_read_rule_config_ex(SFDataReadRule *rule,
IniFullContext *ini_ctx, const SFDataReadRule def_rule); IniFullContext *ini_ctx, const SFDataReadRule def_rule);
static inline const char *sf_get_read_rule_caption( static inline const char *sf_get_read_rule_caption(
@ -85,9 +85,113 @@ static inline const char *sf_get_read_rule_caption(
} }
} }
int sf_load_election_quorum_config_ex(SFElectionQuorum *quorum,
IniFullContext *ini_ctx, const SFElectionQuorum def_quorum);
static inline const char *sf_get_election_quorum_caption(
const SFElectionQuorum quorum)
{
switch (quorum) {
case sf_election_quorum_auto:
return "auto";
case sf_election_quorum_any:
return "any";
case sf_election_quorum_majority:
return "majority";
default:
return "unknown";
}
}
static inline bool sf_election_quorum_check(const SFElectionQuorum quorum,
const bool vote_node_enabled, const int total_count,
const int active_count)
{
switch (quorum) {
case sf_election_quorum_any:
return active_count > 0;
case sf_election_quorum_auto:
if (total_count % 2 == 0 && !vote_node_enabled) {
return active_count > 0; //same as sf_election_quorum_any
}
//continue
case sf_election_quorum_majority:
if (active_count == total_count) {
return true;
} else {
return active_count > total_count / 2;
}
}
}
int sf_load_replication_quorum_config_ex(SFReplicationQuorum *quorum,
IniFullContext *ini_ctx, const SFReplicationQuorum def_quorum);
static inline const char *sf_get_replication_quorum_caption(
const SFReplicationQuorum quorum)
{
switch (quorum) {
case sf_replication_quorum_auto:
return "auto";
case sf_replication_quorum_any:
return "any";
case sf_replication_quorum_majority:
return "majority";
case sf_replication_quorum_smart:
return "smart";
default:
return "unknown";
}
}
#define SF_REPLICATION_QUORUM_MAJORITY(server_count, success_count) \
((success_count == server_count) || (success_count > server_count / 2))
static inline bool sf_replication_quorum_check(const SFReplicationQuorum
quorum, const int server_count, const int success_count)
{
switch (quorum) {
case sf_replication_quorum_any:
return true;
case sf_replication_quorum_auto:
if (server_count % 2 == 0) {
return true; //same as sf_replication_quorum_any
}
//continue
case sf_replication_quorum_smart:
case sf_replication_quorum_majority:
return SF_REPLICATION_QUORUM_MAJORITY(
server_count, success_count);
}
}
#define sf_load_read_rule_config(rule, ini_ctx) \ #define sf_load_read_rule_config(rule, ini_ctx) \
sf_load_read_rule_config_ex(rule, ini_ctx, sf_data_read_rule_master_only) sf_load_read_rule_config_ex(rule, ini_ctx, sf_data_read_rule_master_only)
#define sf_load_election_quorum_config(quorum, ini_ctx) \
sf_load_election_quorum_config_ex(quorum, ini_ctx, sf_election_quorum_auto)
#define sf_load_replication_quorum_config(quorum, ini_ctx) \
sf_load_replication_quorum_config_ex(quorum, ini_ctx, \
sf_replication_quorum_auto)
#define SF_ELECTION_QUORUM_NEED_REQUEST_VOTE_NODE(quorum, \
vote_node_enabled, server_count, active_count) \
(active_count < server_count && vote_node_enabled && \
quorum != sf_election_quorum_any && server_count % 2 == 0)
#define SF_ELECTION_QUORUM_NEED_CHECK_VOTE_NODE(quorum, \
vote_node_enabled, server_count) \
(vote_node_enabled && quorum != sf_election_quorum_any \
&& server_count % 2 == 0)
#define SF_REPLICATION_QUORUM_NEED_MAJORITY(quorum, server_count) \
(server_count > 1 && (quorum != sf_replication_quorum_any))
#define SF_REPLICATION_QUORUM_NEED_DETECT(quorum, server_count) \
(server_count % 2 == 0 && (quorum == sf_replication_quorum_smart || \
quorum == sf_replication_quorum_auto))
#define SF_NET_RETRY_FINISHED(retry_times, counter, result) \ #define SF_NET_RETRY_FINISHED(retry_times, counter, result) \
!((SF_IS_RETRIABLE_ERROR(result) && ((retry_times > 0 && \ !((SF_IS_RETRIABLE_ERROR(result) && ((retry_times > 0 && \
counter <= retry_times) || (retry_times < 0)))) counter <= retry_times) || (retry_times < 0))))

View File

@ -35,13 +35,14 @@ static int get_group_servers(SFConnectionManager *cm,
SFCMConnGroupEntry *group); SFCMConnGroupEntry *group);
static ConnectionInfo *get_spec_connection(SFConnectionManager *cm, static ConnectionInfo *get_spec_connection(SFConnectionManager *cm,
const ConnectionInfo *target, int *err_no) const ConnectionInfo *target, const bool shared, int *err_no)
{ {
return conn_pool_get_connection(&cm->cpool, target, err_no); return conn_pool_get_connection_ex(&cm->cpool,
target, cm->module_name, shared, err_no);
} }
static ConnectionInfo *make_connection(SFConnectionManager *cm, static ConnectionInfo *make_connection(SFConnectionManager *cm,
FCAddressPtrArray *addr_array, int *err_no) FCAddressPtrArray *addr_array, const bool shared, int *err_no)
{ {
FCAddressInfo **current; FCAddressInfo **current;
FCAddressInfo **addr; FCAddressInfo **addr;
@ -55,7 +56,7 @@ static ConnectionInfo *make_connection(SFConnectionManager *cm,
current = addr_array->addrs + addr_array->index; current = addr_array->addrs + addr_array->index;
if ((conn=get_spec_connection(cm, &(*current)->conn, if ((conn=get_spec_connection(cm, &(*current)->conn,
err_no)) != NULL) shared, err_no)) != NULL)
{ {
return conn; return conn;
} }
@ -71,7 +72,7 @@ static ConnectionInfo *make_connection(SFConnectionManager *cm,
} }
if ((conn=get_spec_connection(cm, &(*addr)->conn, if ((conn=get_spec_connection(cm, &(*addr)->conn,
err_no)) != NULL) shared, err_no)) != NULL)
{ {
addr_array->index = addr - addr_array->addrs; addr_array->index = addr - addr_array->addrs;
return conn; return conn;
@ -82,22 +83,22 @@ static ConnectionInfo *make_connection(SFConnectionManager *cm,
} }
static ConnectionInfo *get_server_connection(SFConnectionManager *cm, static ConnectionInfo *get_server_connection(SFConnectionManager *cm,
FCServerInfo *server, int *err_no) FCServerInfo *server, const bool shared, int *err_no)
{ {
FCAddressPtrArray *addr_array; FCAddressPtrArray *addr_array;
ConnectionInfo *conn; ConnectionInfo *conn;
addr_array = &server->group_addrs[cm->server_group_index].address_array; addr_array = &server->group_addrs[cm->server_group_index].address_array;
if ((conn=make_connection(cm, addr_array, err_no)) == NULL) { if ((conn=make_connection(cm, addr_array, shared, err_no)) == NULL) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"server id: %d, get_server_connection fail", "%s server id: %d, address count: %d, get_server_connection fail",
__LINE__, server->id); __LINE__, cm->module_name, server->id, addr_array->count);
} }
return conn; return conn;
} }
static ConnectionInfo *get_connection(SFConnectionManager *cm, static ConnectionInfo *get_connection(SFConnectionManager *cm,
const int group_index, int *err_no) const int group_index, const bool shared, int *err_no)
{ {
SFCMServerArray *server_array; SFCMServerArray *server_array;
ConnectionInfo *conn; ConnectionInfo *conn;
@ -109,7 +110,7 @@ static ConnectionInfo *get_connection(SFConnectionManager *cm,
server_hash_code = rand(); server_hash_code = rand();
server_index = server_hash_code % server_array->count; server_index = server_hash_code % server_array->count;
if ((conn=make_connection(cm, server_array->servers[server_index]. if ((conn=make_connection(cm, server_array->servers[server_index].
addr_array, err_no)) != NULL) addr_array, shared, err_no)) != NULL)
{ {
return conn; return conn;
} }
@ -121,7 +122,7 @@ static ConnectionInfo *get_connection(SFConnectionManager *cm,
} }
if ((conn=make_connection(cm, server_array->servers[i]. if ((conn=make_connection(cm, server_array->servers[i].
addr_array, err_no)) != NULL) addr_array, shared, err_no)) != NULL)
{ {
return conn; return conn;
} }
@ -129,8 +130,8 @@ static ConnectionInfo *get_connection(SFConnectionManager *cm,
} }
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"data group index: %d, get_connection fail, " "%s data group index: %d, get_connection fail, "
"configured server count: %d", __LINE__, "configured server count: %d", __LINE__, cm->module_name,
group_index, server_array->count); group_index, server_array->count);
return NULL; return NULL;
} }
@ -144,33 +145,6 @@ static inline void set_connection_params(ConnectionInfo *conn,
cparam->cm.old_alives = old_alives; cparam->cm.old_alives = old_alives;
} }
static inline ConnectionInfo *make_master_connection(SFConnectionManager *cm,
SFCMConnGroupEntry *group, int *err_no)
{
SFCMServerEntry *master;
ConnectionInfo *conn;
master = (SFCMServerEntry *)FC_ATOMIC_GET(group->master);
if (master != NULL) {
if ((conn=make_connection(cm, master->addr_array,
err_no)) != NULL)
{
if (cm->common_cfg->read_rule == sf_data_read_rule_master_only) {
set_connection_params(conn, master, NULL);
} else {
SFCMServerPtrArray *alives;
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
set_connection_params(conn, master, alives);
}
return conn;
}
__sync_bool_compare_and_swap(&group->master, master, NULL);
}
return NULL;
}
static inline int push_to_detect_queue(SFConnectionManager *cm, static inline int push_to_detect_queue(SFConnectionManager *cm,
SFCMConnGroupEntry *group, SFCMServerPtrArray *alives) SFCMConnGroupEntry *group, SFCMServerPtrArray *alives)
{ {
@ -202,7 +176,7 @@ static inline bool alive_array_cas(SFConnectionManager *cm,
push_to_detect_queue(cm, group, new_alives); push_to_detect_queue(cm, group, new_alives);
fast_mblock_delay_free_object(&cm->sptr_array_allocator, old_alives, fast_mblock_delay_free_object(&cm->sptr_array_allocator, old_alives,
(cm->common_cfg->connect_timeout + cm->common_cfg-> (cm->common_cfg->connect_timeout + cm->common_cfg->
network_timeout) * group->all.count); network_timeout) * group->all.count * 4);
return true; return true;
} else { } else {
fast_mblock_free_object(&cm->sptr_array_allocator, new_alives); fast_mblock_free_object(&cm->sptr_array_allocator, new_alives);
@ -250,14 +224,42 @@ static int remove_from_alives(SFConnectionManager *cm,
return 0; return 0;
} }
static inline ConnectionInfo *make_master_connection(SFConnectionManager *cm,
SFCMConnGroupEntry *group, const bool shared, int *err_no)
{
SFCMServerEntry *master;
ConnectionInfo *conn;
SFCMServerPtrArray *alives;
master = (SFCMServerEntry *)FC_ATOMIC_GET(group->master);
if (master != NULL) {
if ((conn=make_connection(cm, master->addr_array,
shared, err_no)) != NULL)
{
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
set_connection_params(conn, master, alives);
return conn;
} else {
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
if (alives != NULL) {
remove_from_alives(cm, group, alives, master);
}
__sync_bool_compare_and_swap(&group->master, master, NULL);
}
}
*err_no = SF_RETRIABLE_ERROR_NO_SERVER;
return NULL;
}
static inline ConnectionInfo *make_readable_connection(SFConnectionManager *cm, static inline ConnectionInfo *make_readable_connection(SFConnectionManager *cm,
SFCMConnGroupEntry *group, SFCMServerPtrArray *alives, SFCMConnGroupEntry *group, SFCMServerPtrArray *alives,
const int index, int *err_no) const int index, const bool shared, int *err_no)
{ {
ConnectionInfo *conn; ConnectionInfo *conn;
if ((conn=make_connection(cm, alives->servers[index]-> if ((conn=make_connection(cm, alives->servers[index]->
addr_array, err_no)) == NULL) addr_array, shared, err_no)) == NULL)
{ {
remove_from_alives(cm, group, alives, alives->servers[index]); remove_from_alives(cm, group, alives, alives->servers[index]);
} else { } else {
@ -268,72 +270,82 @@ static inline ConnectionInfo *make_readable_connection(SFConnectionManager *cm,
} }
static ConnectionInfo *get_master_connection(SFConnectionManager *cm, static ConnectionInfo *get_master_connection(SFConnectionManager *cm,
const int group_index, int *err_no) const int group_index, const bool shared, int *err_no)
{ {
SFCMConnGroupEntry *group; SFCMConnGroupEntry *group;
ConnectionInfo *conn; ConnectionInfo *conn;
SFNetRetryIntervalContext net_retry_ctx; SFNetRetryIntervalContext net_retry_ctx;
int i; int retry_count;
group = cm->groups.entries + group_index; group = cm->groups.entries + group_index;
sf_init_net_retry_interval_context(&net_retry_ctx, sf_init_net_retry_interval_context(&net_retry_ctx,
&cm->common_cfg->net_retry_cfg.interval_mm, &cm->common_cfg->net_retry_cfg.interval_mm,
&cm->common_cfg->net_retry_cfg.connect); &cm->common_cfg->net_retry_cfg.connect);
i = 0; retry_count = 0;
while (1) { while (1) {
if ((conn=make_master_connection(cm, group, err_no)) != NULL) { if ((conn=make_master_connection(cm, group, shared, err_no)) != NULL) {
return conn; return conn;
} }
/*
logInfo("file: "__FILE__", line: %d, "
"retry_count: %d, interval_ms: %d, data group id: %d, "
"master: %p, alive count: %d, all count: %d", __LINE__,
retry_count, net_retry_ctx.interval_ms, group->id,
FC_ATOMIC_GET(group->master), ((SFCMServerPtrArray *)
FC_ATOMIC_GET(group->alives))->count, group->all.count);
*/
*err_no = get_group_servers(cm, group); *err_no = get_group_servers(cm, group);
if (*err_no == 0) { if (*err_no == 0) {
*err_no = SF_RETRIABLE_ERROR_NO_SERVER; //for try again *err_no = SF_RETRIABLE_ERROR_NO_SERVER; //for try again
} }
SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx, SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx,
cm->common_cfg->net_retry_cfg. cm->common_cfg->net_retry_cfg.
connect.times, ++i, *err_no); connect.times, ++retry_count, *err_no);
} }
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"get_master_connection fail, errno: %d", "%s get_master_connection fail, group id: %d, "
__LINE__, *err_no); "retry count: %d, " "errno: %d", __LINE__,
cm->module_name, group->id, retry_count, *err_no);
return NULL; return NULL;
} }
static ConnectionInfo *get_readable_connection(SFConnectionManager *cm, static ConnectionInfo *get_readable_connection(SFConnectionManager *cm,
const int group_index, int *err_no) const int group_index, const bool shared, int *err_no)
{ {
SFCMConnGroupEntry *group; SFCMConnGroupEntry *group;
SFCMServerPtrArray *alives; SFCMServerPtrArray *alives;
ConnectionInfo *conn; ConnectionInfo *conn;
SFNetRetryIntervalContext net_retry_ctx; SFNetRetryIntervalContext net_retry_ctx;
uint32_t index; uint32_t index;
int i; int retry_count;
group = cm->groups.entries + group_index; group = cm->groups.entries + group_index;
if ((cm->common_cfg->read_rule == sf_data_read_rule_master_only) || if ((cm->common_cfg->read_rule == sf_data_read_rule_master_only) ||
(group->all.count == 1)) (group->all.count == 1))
{ {
return get_master_connection(cm, group_index, err_no); return get_master_connection(cm, group_index, shared, err_no);
} }
sf_init_net_retry_interval_context(&net_retry_ctx, sf_init_net_retry_interval_context(&net_retry_ctx,
&cm->common_cfg->net_retry_cfg.interval_mm, &cm->common_cfg->net_retry_cfg.interval_mm,
&cm->common_cfg->net_retry_cfg.connect); &cm->common_cfg->net_retry_cfg.connect);
i = 0; retry_count = 0;
while (1) { while (1) {
alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives); alives = (SFCMServerPtrArray *)FC_ATOMIC_GET(group->alives);
if (alives->count > 0) { if (alives->count > 0) {
index = rand() % alives->count; index = rand() % alives->count;
if ((conn=make_readable_connection(cm, group, alives, if ((conn=make_readable_connection(cm, group, alives,
index, err_no)) != NULL) index, shared, err_no)) != NULL)
{ {
return conn; return conn;
} }
} }
if (cm->common_cfg->read_rule == sf_data_read_rule_slave_first) { if (cm->common_cfg->read_rule == sf_data_read_rule_slave_first) {
if ((conn=make_master_connection(cm, group, err_no)) != NULL) { if ((conn=make_master_connection(cm, group, shared, err_no)) != NULL) {
return conn; return conn;
} }
} }
@ -344,12 +356,12 @@ static ConnectionInfo *get_readable_connection(SFConnectionManager *cm,
} }
SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx, SF_NET_RETRY_CHECK_AND_SLEEP(net_retry_ctx,
cm->common_cfg->net_retry_cfg. cm->common_cfg->net_retry_cfg.
connect.times, ++i, *err_no); connect.times, ++retry_count, *err_no);
} }
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"get_readable_connection fail, errno: %d", "%s get_readable_connection fail, retry count: %d, errno: %d",
__LINE__, *err_no); __LINE__, cm->module_name, retry_count, *err_no);
return NULL; return NULL;
} }
@ -376,12 +388,11 @@ static void close_connection(SFConnectionManager *cm, ConnectionInfo *conn)
if (cparam->cm.sentry != NULL) { if (cparam->cm.sentry != NULL) {
server = cparam->cm.sentry; server = cparam->cm.sentry;
group = cm->groups.entries + server->group_index; group = cm->groups.entries + server->group_index;
if (cparam->cm.old_alives == NULL) { if (cparam->cm.old_alives != NULL) {
__sync_bool_compare_and_swap(&group->master, server, NULL);
} else {
remove_from_alives(cm, group, cparam->cm.old_alives, server); remove_from_alives(cm, group, cparam->cm.old_alives, server);
cparam->cm.old_alives = NULL; cparam->cm.old_alives = NULL;
} }
__sync_bool_compare_and_swap(&group->master, server, NULL);
cparam->cm.sentry = NULL; cparam->cm.sentry = NULL;
} }
@ -389,11 +400,12 @@ static void close_connection(SFConnectionManager *cm, ConnectionInfo *conn)
} }
static ConnectionInfo *get_leader_connection(SFConnectionManager *cm, static ConnectionInfo *get_leader_connection(SFConnectionManager *cm,
FCServerInfo *server, int *err_no) FCServerInfo *server, const bool shared, int *err_no)
{ {
ConnectionInfo *conn; ConnectionInfo *conn;
SFClientServerEntry leader; SFClientServerEntry leader;
SFNetRetryIntervalContext net_retry_ctx; SFNetRetryIntervalContext net_retry_ctx;
char formatted_ip[FORMATTED_IP_SIZE];
int i; int i;
int connect_fails; int connect_fails;
@ -404,14 +416,14 @@ static ConnectionInfo *get_leader_connection(SFConnectionManager *cm,
while (1) { while (1) {
do { do {
if ((conn=get_server_connection(cm, server, if ((conn=get_server_connection(cm, server,
err_no)) == NULL) shared, err_no)) == NULL)
{ {
connect_fails++; connect_fails++;
break; break;
} }
if ((*err_no=sf_proto_get_leader(conn, cm->common_cfg-> if ((*err_no=sf_proto_get_leader(conn, cm->module_name, cm->
network_timeout, &leader)) != 0) common_cfg->network_timeout, &leader)) != 0)
{ {
close_connection(cm, conn); close_connection(cm, conn);
break; break;
@ -421,9 +433,27 @@ static ConnectionInfo *get_leader_connection(SFConnectionManager *cm,
return conn; return conn;
} }
release_connection(cm, conn); release_connection(cm, conn);
if ((conn=get_spec_connection(cm,&leader.conn,
err_no)) == NULL) if ((conn=get_spec_connection(cm, &leader.conn,
shared, err_no)) == NULL)
{ {
if (cm->server_cfg != NULL) {
FCServerInfo *ls;
if ((ls=fc_server_get_by_id(cm->server_cfg,
leader.server_id)) != NULL)
{
if (ls->group_addrs[cm->server_group_index].
address_array.count > 1)
{
if ((conn=get_server_connection(cm, ls,
shared, err_no)) != NULL)
{
return conn;
}
}
}
}
break; break;
} }
@ -439,11 +469,13 @@ static ConnectionInfo *get_leader_connection(SFConnectionManager *cm,
connect.times, ++i, *err_no); connect.times, ++i, *err_no);
} }
format_ip_address(server->group_addrs[cm->server_group_index].
address_array.addrs[0]->conn.ip_addr, formatted_ip);
logWarning("file: "__FILE__", line: %d, " logWarning("file: "__FILE__", line: %d, "
"get_leader_connection fail, server id: %d, %s:%u, errno: %d", "%s get_leader_connection fail, server id: %d, %s:%u, errno: %d",
__LINE__, server->id, server->group_addrs[cm->server_group_index]. __LINE__, cm->module_name, server->id, formatted_ip,
address_array.addrs[0]->conn.ip_addr, server->group_addrs[cm-> server->group_addrs[cm->server_group_index].address_array.
server_group_index].address_array.addrs[0]->conn.port, *err_no); addrs[0]->conn.port, *err_no);
return NULL; return NULL;
} }
@ -463,7 +495,7 @@ int sf_cm_validate_connection_callback(ConnectionInfo *conn, void *args)
if ((result=sf_active_test(conn, &response, cm->common_cfg-> if ((result=sf_active_test(conn, &response, cm->common_cfg->
network_timeout)) != 0) network_timeout)) != 0)
{ {
sf_log_network_error(&response, conn, result); sf_log_network_error(&response, conn, cm->module_name, result);
} }
return result; return result;
@ -489,21 +521,52 @@ int sf_connection_manager_init_ex(SFConnectionManager *cm,
const int group_count, const int server_group_index, const int group_count, const int server_group_index,
const int server_count, const int max_count_per_entry, const int server_count, const int max_count_per_entry,
const int max_idle_time, fc_connection_callback_func const int max_idle_time, fc_connection_callback_func
connect_done_callback, void *args, const bool bg_thread_enabled) connect_done_callback, void *args, FCServerConfig *server_cfg,
const bool bg_thread_enabled)
{ {
const int socket_domain = AF_INET; struct {
int htable_init_capacity; ConnectionExtraParams holder;
ConnectionExtraParams *ptr;
} extra_params;
FCServerGroupInfo *server_group;
int htable_capacity;
int result; int result;
htable_init_capacity = 4 * server_count; if (server_count <= 4) {
if (htable_init_capacity < 256) { htable_capacity = 16;
htable_init_capacity = 256; } else if (server_count <= 16) {
htable_capacity = 64;
} else if (server_count <= 32) {
htable_capacity = 128;
} else if (server_count < 64) {
htable_capacity = 256;
} else {
htable_capacity = 4 * server_count;
}
if ((server_group=fc_server_get_group_by_index(server_cfg,
server_group_index)) == NULL)
{
return ENOENT;
}
if (server_group->comm_type == fc_comm_type_sock) {
extra_params.ptr = NULL;
} else {
if ((result=conn_pool_set_rdma_extra_params(&extra_params.holder,
server_cfg, server_group_index)) != 0)
{
return result;
}
extra_params.ptr = &extra_params.holder;
} }
if ((result=conn_pool_init_ex1(&cm->cpool, common_cfg->connect_timeout, if ((result=conn_pool_init_ex1(&cm->cpool, common_cfg->connect_timeout,
max_count_per_entry, max_idle_time, socket_domain, max_count_per_entry, max_idle_time, htable_capacity,
htable_init_capacity, connect_done_callback, args, connect_done_callback, args,
sf_cm_validate_connection_callback, cm, sf_cm_validate_connection_callback, cm,
sizeof(SFConnectionParameters))) != 0) sizeof(SFConnectionParameters),
extra_params.ptr)) != 0)
{ {
return result; return result;
} }
@ -523,9 +586,11 @@ int sf_connection_manager_init_ex(SFConnectionManager *cm,
cm->server_group_index = server_group_index; cm->server_group_index = server_group_index;
cm->module_name = module_name; cm->module_name = module_name;
cm->common_cfg = common_cfg; cm->common_cfg = common_cfg;
cm->server_cfg = server_cfg;
cm->alive_detect.bg_thread_enabled = bg_thread_enabled; cm->alive_detect.bg_thread_enabled = bg_thread_enabled;
cm->max_servers_per_group = 0; cm->max_servers_per_group = 0;
cm->extra = NULL; cm->extra = NULL;
cm->exclude_server_id = 0;
cm->ops.get_connection = get_connection; cm->ops.get_connection = get_connection;
cm->ops.get_server_connection = get_server_connection; cm->ops.get_server_connection = get_server_connection;
@ -687,8 +752,8 @@ static int do_get_group_servers(SFConnectionManager *cm,
sarray.alloc = MAX_GROUP_SERVER_COUNT; sarray.alloc = MAX_GROUP_SERVER_COUNT;
sarray.count = 0; sarray.count = 0;
sarray.servers = fixed_servers; sarray.servers = fixed_servers;
if ((result=sf_proto_get_group_servers(conn, cm->common_cfg-> if ((result=sf_proto_get_group_servers(conn, cm->module_name, cm->
network_timeout, group->id, &sarray)) != 0) common_cfg->network_timeout, group->id, &sarray)) != 0)
{ {
return result; return result;
} }
@ -713,6 +778,7 @@ static int do_get_group_servers(SFConnectionManager *cm,
static int get_group_servers_by_active(SFConnectionManager *cm, static int get_group_servers_by_active(SFConnectionManager *cm,
SFCMConnGroupEntry *group) SFCMConnGroupEntry *group)
{ {
const bool shared = true;
SFCMServerPtrArray *alives; SFCMServerPtrArray *alives;
SFCMServerEntry **server; SFCMServerEntry **server;
SFCMServerEntry **end; SFCMServerEntry **end;
@ -728,7 +794,7 @@ static int get_group_servers_by_active(SFConnectionManager *cm,
end = alives->servers + alives->count; end = alives->servers + alives->count;
for (server=alives->servers; server<end; server++) { for (server=alives->servers; server<end; server++) {
if ((conn=make_connection(cm, (*server)->addr_array, if ((conn=make_connection(cm, (*server)->addr_array,
&result)) == NULL) shared, &result)) == NULL)
{ {
continue; continue;
} }
@ -746,6 +812,7 @@ static int get_group_servers_by_active(SFConnectionManager *cm,
static int get_group_servers_by_all(SFConnectionManager *cm, static int get_group_servers_by_all(SFConnectionManager *cm,
SFCMConnGroupEntry *group) SFCMConnGroupEntry *group)
{ {
const bool shared = true;
SFCMServerEntry *server; SFCMServerEntry *server;
SFCMServerEntry *end; SFCMServerEntry *end;
ConnectionInfo *conn; ConnectionInfo *conn;
@ -758,8 +825,12 @@ static int get_group_servers_by_all(SFConnectionManager *cm,
end = group->all.servers + group->all.count; end = group->all.servers + group->all.count;
for (server=group->all.servers; server<end; server++) { for (server=group->all.servers; server<end; server++) {
if (server->id == cm->exclude_server_id) {
continue;
}
if ((conn=make_connection(cm, server->addr_array, if ((conn=make_connection(cm, server->addr_array,
&result)) == NULL) shared, &result)) == NULL)
{ {
continue; continue;
} }
@ -822,7 +893,7 @@ static void *connection_manager_thread_func(void *arg)
#endif #endif
cm = (SFConnectionManager *)arg; cm = (SFConnectionManager *)arg;
logInfo("file: "__FILE__", line: %d, " logDebug("file: "__FILE__", line: %d, "
"[%s] connection manager thread start", "[%s] connection manager thread start",
__LINE__, cm->module_name); __LINE__, cm->module_name);

View File

@ -26,16 +26,17 @@
struct sf_connection_manager; struct sf_connection_manager;
typedef ConnectionInfo *(*sf_get_connection_func)( typedef ConnectionInfo *(*sf_get_connection_func)(
struct sf_connection_manager *cm, struct sf_connection_manager *cm, const int group_index,
const int group_index, int *err_no); const bool shared, int *err_no);
typedef ConnectionInfo *(*sf_get_server_connection_func)( typedef ConnectionInfo *(*sf_get_server_connection_func)(
struct sf_connection_manager *cm, struct sf_connection_manager *cm,
FCServerInfo *server, int *err_no); FCServerInfo *server, const bool shared, int *err_no);
typedef ConnectionInfo *(*sf_get_spec_connection_func)( typedef ConnectionInfo *(*sf_get_spec_connection_func)(
struct sf_connection_manager *cm, struct sf_connection_manager *cm,
const ConnectionInfo *target, int *err_no); const ConnectionInfo *target,
const bool shared, int *err_no);
typedef void (*sf_release_connection_func)( typedef void (*sf_release_connection_func)(
struct sf_connection_manager *cm, ConnectionInfo *conn); struct sf_connection_manager *cm, ConnectionInfo *conn);
@ -103,8 +104,9 @@ typedef struct sf_cm_operations {
} SFCMOperations; } SFCMOperations;
typedef struct sf_connection_manager { typedef struct sf_connection_manager {
short server_group_index; int exclude_server_id; //for server side
short max_servers_per_group; uint16_t max_servers_per_group;
uint8_t server_group_index;
struct { struct {
bool bg_thread_enabled; bool bg_thread_enabled;
struct common_blocked_queue queue; struct common_blocked_queue queue;
@ -115,6 +117,7 @@ typedef struct sf_connection_manager {
ConnectionPool cpool; ConnectionPool cpool;
struct fast_mblock_man sptr_array_allocator; //element: SFCMServerPtrArray struct fast_mblock_man sptr_array_allocator; //element: SFCMServerPtrArray
SFCMOperations ops; SFCMOperations ops;
FCServerConfig *server_cfg;
void *extra; //for simple connection manager void *extra; //for simple connection manager
} SFConnectionManager; } SFConnectionManager;
@ -123,7 +126,8 @@ int sf_connection_manager_init_ex(SFConnectionManager *cm,
const int group_count, const int server_group_index, const int group_count, const int server_group_index,
const int server_count, const int max_count_per_entry, const int server_count, const int max_count_per_entry,
const int max_idle_time, fc_connection_callback_func const int max_idle_time, fc_connection_callback_func
connect_done_callback, void *args, const bool bg_thread_enabled); connect_done_callback, void *args, FCServerConfig *server_cfg,
const bool bg_thread_enabled);
static inline int sf_connection_manager_init(SFConnectionManager *cm, static inline int sf_connection_manager_init(SFConnectionManager *cm,
const char *module_name, const SFClientCommonConfig *common_cfg, const char *module_name, const SFClientCommonConfig *common_cfg,
@ -136,7 +140,13 @@ static inline int sf_connection_manager_init(SFConnectionManager *cm,
return sf_connection_manager_init_ex(cm, module_name, return sf_connection_manager_init_ex(cm, module_name,
common_cfg, group_count, server_group_index, common_cfg, group_count, server_group_index,
server_count, max_count_per_entry, max_idle_time, server_count, max_count_per_entry, max_idle_time,
connect_done_callback, args, bg_thread_enabled); connect_done_callback, args, NULL, bg_thread_enabled);
}
static inline void sf_connection_manager_set_exclude_server_id(
SFConnectionManager *cm, const int exclude_server_id)
{
cm->exclude_server_id = exclude_server_id;
} }
int sf_connection_manager_add(SFConnectionManager *cm, const int group_id, int sf_connection_manager_add(SFConnectionManager *cm, const int group_id,

View File

@ -20,8 +20,8 @@
#include "fastcommon/sockopt.h" #include "fastcommon/sockopt.h"
#define SF_DEFAULT_CONNECT_TIMEOUT 2 #define SF_DEFAULT_CONNECT_TIMEOUT 10
#define SF_DEFAULT_NETWORK_TIMEOUT 10 #define SF_DEFAULT_NETWORK_TIMEOUT 60
#define SF_DEF_THREAD_STACK_SIZE (256 * 1024) #define SF_DEF_THREAD_STACK_SIZE (256 * 1024)
#define SF_MIN_THREAD_STACK_SIZE (64 * 1024) #define SF_MIN_THREAD_STACK_SIZE (64 * 1024)
@ -45,8 +45,9 @@
#define SF_NIO_TASK_STAGE_FETCH(task) task->nio_stages.current #define SF_NIO_TASK_STAGE_FETCH(task) task->nio_stages.current
#define SF_SESSION_ERROR_NOT_EXIST 9992 #define SF_SESSION_ERROR_NOT_EXIST 9992
#define SF_CLUSTER_ERROR_NOT_LEADER 9996 #define SF_CLUSTER_ERROR_NOT_LEADER 9995
#define SF_CLUSTER_ERROR_LEADER_VERSION_INCONSISTENT 9997 #define SF_CLUSTER_ERROR_LEADER_VERSION_INCONSISTENT 9996
#define SF_CLUSTER_ERROR_BINLOG_MISSED 9997
#define SF_CLUSTER_ERROR_BINLOG_INCONSISTENT 9998 #define SF_CLUSTER_ERROR_BINLOG_INCONSISTENT 9998
#define SF_CLUSTER_ERROR_LEADER_INCONSISTENT 9999 #define SF_CLUSTER_ERROR_LEADER_INCONSISTENT 9999
#define SF_CLUSTER_ERROR_MASTER_INCONSISTENT SF_CLUSTER_ERROR_LEADER_INCONSISTENT #define SF_CLUSTER_ERROR_MASTER_INCONSISTENT SF_CLUSTER_ERROR_LEADER_INCONSISTENT
@ -64,9 +65,13 @@
#define SF_ERROR_EBUSY 8816 #define SF_ERROR_EBUSY 8816
#define SF_ERROR_EINVAL 8822 #define SF_ERROR_EINVAL 8822
#define SF_ERROR_EAGAIN 8835 #define SF_ERROR_EAGAIN 8835
#define SF_ERROR_EINPROGRESS 8836
#define SF_ERROR_EOVERFLOW 8884 #define SF_ERROR_EOVERFLOW 8884
#define SF_ERROR_EOPNOTSUPP 8895 #define SF_ERROR_EOPNOTSUPP 8895
#define SF_ERROR_ENOLINK 8867
#define SF_ERROR_ENODATA 8861 #define SF_ERROR_ENODATA 8861
#define SF_ERROR_ENOTEMPTY 8839
#define SF_ERROR_ELOOP 8840
#define SF_FORCE_CLOSE_CONNECTION_ERROR_MIN SF_RETRIABLE_ERROR_NOT_MASTER #define SF_FORCE_CLOSE_CONNECTION_ERROR_MIN SF_RETRIABLE_ERROR_NOT_MASTER
#define SF_FORCE_CLOSE_CONNECTION_ERROR_MAX SF_RETRIABLE_ERROR_MAX #define SF_FORCE_CLOSE_CONNECTION_ERROR_MAX SF_RETRIABLE_ERROR_MAX
@ -91,6 +96,8 @@
#define SF_BINLOG_SOURCE_USER 'U' //by user call #define SF_BINLOG_SOURCE_USER 'U' //by user call
#define SF_BINLOG_SOURCE_REPLAY 'R' //by binlog replay #define SF_BINLOG_SOURCE_REPLAY 'R' //by binlog replay
#define SF_LOG_SCHEDULE_ENTRIES_COUNT 3
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif

View File

@ -34,58 +34,138 @@
#include "sf_func.h" #include "sf_func.h"
#include "sf_file_writer.h" #include "sf_file_writer.h"
#define BINLOG_INDEX_FILENAME SF_BINLOG_FILE_PREFIX"_index.dat" #define BINLOG_INDEX_ITEM_START_INDEX_STR "start_index"
#define BINLOG_INDEX_ITEM_START_INDEX_LEN \
(sizeof(BINLOG_INDEX_ITEM_START_INDEX_STR) - 1)
#define BINLOG_INDEX_ITEM_CURRENT_WRITE "current_write" #define BINLOG_INDEX_ITEM_CURRENT_WRITE_STR "current_write"
#define BINLOG_INDEX_ITEM_CURRENT_COMPRESS "current_compress" #define BINLOG_INDEX_ITEM_CURRENT_WRITE_LEN \
(sizeof(BINLOG_INDEX_ITEM_CURRENT_WRITE_STR) - 1)
#define GET_BINLOG_FILENAME(writer) \ #define BINLOG_INDEX_ITEM_CURRENT_COMPRESS_STR "current_compress"
sprintf(writer->file.name, "%s/%s/%s"SF_BINLOG_FILE_EXT_FMT, \ #define BINLOG_INDEX_ITEM_CURRENT_COMPRESS_LEN \
writer->cfg.data_path, writer->cfg.subdir_name, \ (sizeof(BINLOG_INDEX_ITEM_CURRENT_COMPRESS_STR) - 1)
SF_BINLOG_FILE_PREFIX, writer->binlog.index)
static int write_to_binlog_index_file(SFFileWriterInfo *writer) static inline void sf_file_writer_get_binlog_filename(SFFileWriterInfo *writer)
{ {
char full_filename[PATH_MAX]; sf_file_writer_get_filename_ex(
writer->cfg.data_path, writer->cfg.subdir_name,
writer->cfg.file_prefix, writer->binlog.last_index,
writer->file.name.str, writer->file.name.size);
}
static inline void sf_file_writer_get_index_filename_ex(
const char *data_path, const char *subdir_name,
const char *file_prefix, const int file_prefix_len,
char *filename, const int size)
{
#define INDEX_FILENAME_AFFIX_STR "_index.dat"
#define INDEX_FILENAME_AFFIX_LEN (sizeof(INDEX_FILENAME_AFFIX_STR) - 1)
char *p;
int data_path_len;
int subdir_name_len;
data_path_len = strlen(data_path);
subdir_name_len = strlen(subdir_name);
if (data_path_len + 1 + subdir_name_len + 1 + file_prefix_len +
INDEX_FILENAME_AFFIX_LEN >= size)
{
*filename = '\0';
return;
}
memcpy(filename, data_path, data_path_len);
p = filename + data_path_len;
*p++ = '/';
memcpy(p, subdir_name, subdir_name_len);
p += subdir_name_len;
*p++ = '/';
memcpy(p, file_prefix, file_prefix_len);
p += file_prefix_len;
memcpy(p, INDEX_FILENAME_AFFIX_STR, INDEX_FILENAME_AFFIX_LEN);
p += INDEX_FILENAME_AFFIX_LEN;
*p = '\0';
}
const char *sf_file_writer_get_index_filename(const char *data_path,
const char *subdir_name, char *filename, const int size)
{
sf_file_writer_get_index_filename_ex(data_path, subdir_name,
SF_BINLOG_FILE_PREFIX_STR, SF_BINLOG_FILE_PREFIX_LEN,
filename, size);
return filename;
}
int sf_file_writer_write_to_binlog_index_file_ex(const char *data_path,
const char *subdir_name, const char *file_prefix,
const int start_index, const int last_index,
const int compress_index)
{
char filename[PATH_MAX];
char buff[256]; char buff[256];
char *p;
int result; int result;
int len; int len;
snprintf(full_filename, sizeof(full_filename), "%s/%s/%s", sf_file_writer_get_index_filename_ex(data_path, subdir_name, file_prefix,
writer->cfg.data_path, writer->cfg.subdir_name, strlen(file_prefix), filename, sizeof(filename));
BINLOG_INDEX_FILENAME); p = buff;
memcpy(p, BINLOG_INDEX_ITEM_START_INDEX_STR,
BINLOG_INDEX_ITEM_START_INDEX_LEN);
p += BINLOG_INDEX_ITEM_START_INDEX_LEN;
*p++ = '=';
p += fc_itoa(start_index, p);
*p++ = '\n';
len = sprintf(buff, "%s=%d\n" memcpy(p, BINLOG_INDEX_ITEM_CURRENT_WRITE_STR,
"%s=%d\n", BINLOG_INDEX_ITEM_CURRENT_WRITE_LEN);
BINLOG_INDEX_ITEM_CURRENT_WRITE, p += BINLOG_INDEX_ITEM_CURRENT_WRITE_LEN;
writer->binlog.index, *p++ = '=';
BINLOG_INDEX_ITEM_CURRENT_COMPRESS, p += fc_itoa(last_index, p);
writer->binlog.compress_index); *p++ = '\n';
if ((result=safeWriteToFile(full_filename, buff, len)) != 0) {
memcpy(p, BINLOG_INDEX_ITEM_CURRENT_COMPRESS_STR,
BINLOG_INDEX_ITEM_CURRENT_COMPRESS_LEN);
p += BINLOG_INDEX_ITEM_CURRENT_COMPRESS_LEN;
*p++ = '=';
p += fc_itoa(compress_index, p);
*p++ = '\n';
len = p - buff;
if ((result=safeWriteToFile(filename, buff, len)) != 0) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"write to file \"%s\" fail, " "write to file \"%s\" fail, errno: %d, error info: %s",
"errno: %d, error info: %s", __LINE__, filename, result, STRERROR(result));
__LINE__, full_filename,
result, STRERROR(result));
} }
return result; return result;
} }
static int get_binlog_index_from_file(SFFileWriterInfo *writer) static inline int write_to_binlog_index_file(SFFileWriterInfo *writer)
{
return sf_file_writer_write_to_binlog_index_file_ex(
writer->cfg.data_path, writer->cfg.subdir_name,
writer->cfg.file_prefix, writer->binlog.start_index,
writer->binlog.last_index, writer->binlog.compress_index);
}
static int get_binlog_info_from_file(const char *data_path,
const char *subdir_name, int *start_index,
int *last_index, int *compress_index)
{ {
char full_filename[PATH_MAX]; char full_filename[PATH_MAX];
IniContext ini_context; IniContext ini_context;
int result; int result;
snprintf(full_filename, sizeof(full_filename), "%s/%s/%s", sf_file_writer_get_index_filename_ex(data_path, subdir_name,
writer->cfg.data_path, writer->cfg.subdir_name, SF_BINLOG_FILE_PREFIX_STR, SF_BINLOG_FILE_PREFIX_LEN,
BINLOG_INDEX_FILENAME); full_filename, sizeof(full_filename));
if (access(full_filename, F_OK) != 0) { if (access(full_filename, F_OK) != 0) {
if (errno == ENOENT) { return errno != 0 ? errno : EPERM;
writer->binlog.index = 0;
return write_to_binlog_index_file(writer);
}
} }
if ((result=iniLoadFromFile(full_filename, &ini_context)) != 0) { if ((result=iniLoadFromFile(full_filename, &ini_context)) != 0) {
@ -95,29 +175,71 @@ static int get_binlog_index_from_file(SFFileWriterInfo *writer)
return result; return result;
} }
writer->binlog.index = iniGetIntValue(NULL, *start_index = iniGetIntValue(NULL,
BINLOG_INDEX_ITEM_CURRENT_WRITE, &ini_context, 0); BINLOG_INDEX_ITEM_START_INDEX_STR,
writer->binlog.compress_index = iniGetIntValue(NULL, &ini_context, 0);
BINLOG_INDEX_ITEM_CURRENT_COMPRESS, &ini_context, 0); *last_index = iniGetIntValue(NULL,
BINLOG_INDEX_ITEM_CURRENT_WRITE_STR,
&ini_context, 0);
*compress_index = iniGetIntValue(NULL,
BINLOG_INDEX_ITEM_CURRENT_COMPRESS_STR,
&ini_context, 0);
iniFreeContext(&ini_context); iniFreeContext(&ini_context);
return 0; return 0;
} }
int sf_file_writer_get_binlog_indexes(const char *data_path,
const char *subdir_name, int *start_index, int *last_index)
{
int result;
int compress_index;
result = get_binlog_info_from_file(data_path, subdir_name,
start_index, last_index, &compress_index);
if (result == ENOENT) {
*start_index = *last_index = 0;
return 0;
} else {
return result;
}
}
static inline int get_binlog_index_from_file(SFFileWriterInfo *writer)
{
int result;
result = get_binlog_info_from_file(writer->cfg.data_path,
writer->cfg.subdir_name, &writer->binlog.start_index,
&writer->binlog.last_index, &writer->binlog.compress_index);
if (result == ENOENT) {
writer->binlog.start_index = 0;
writer->binlog.last_index = 0;
writer->binlog.compress_index = 0;
if (writer->cfg.file_rotate_size > 0) {
return write_to_binlog_index_file(writer);
} else {
return 0;
}
} else {
return result;
}
}
static int open_writable_binlog(SFFileWriterInfo *writer) static int open_writable_binlog(SFFileWriterInfo *writer)
{ {
if (writer->file.fd >= 0) { if (writer->file.fd >= 0) {
close(writer->file.fd); close(writer->file.fd);
} }
GET_BINLOG_FILENAME(writer); sf_file_writer_get_binlog_filename(writer);
writer->file.fd = open(writer->file.name, writer->file.fd = open(writer->file.name.str, O_WRONLY |
O_WRONLY | O_CREAT | O_APPEND, 0644); O_CREAT | O_APPEND | O_CLOEXEC, 0644);
if (writer->file.fd < 0) { if (writer->file.fd < 0) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"open file \"%s\" fail, " "open file \"%s\" fail, "
"errno: %d, error info: %s", "errno: %d, error info: %s",
__LINE__, writer->file.name, __LINE__, writer->file.name.str,
errno, STRERROR(errno)); errno, STRERROR(errno));
return errno != 0 ? errno : EACCES; return errno != 0 ? errno : EACCES;
} }
@ -127,7 +249,7 @@ static int open_writable_binlog(SFFileWriterInfo *writer)
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"lseek file \"%s\" fail, " "lseek file \"%s\" fail, "
"errno: %d, error info: %s", "errno: %d, error info: %s",
__LINE__, writer->file.name, __LINE__, writer->file.name.str,
errno, STRERROR(errno)); errno, STRERROR(errno));
return errno != 0 ? errno : EIO; return errno != 0 ? errno : EIO;
} }
@ -137,23 +259,24 @@ static int open_writable_binlog(SFFileWriterInfo *writer)
static int open_next_binlog(SFFileWriterInfo *writer) static int open_next_binlog(SFFileWriterInfo *writer)
{ {
GET_BINLOG_FILENAME(writer); sf_file_writer_get_binlog_filename(writer);
if (access(writer->file.name, F_OK) == 0) { if (access(writer->file.name.str, F_OK) == 0) {
char bak_filename[PATH_MAX]; char bak_filename[PATH_MAX];
char date_str[32]; char date_str[32];
snprintf(bak_filename, sizeof(bak_filename), "%s.%s", formatDatetime(g_current_time, "%Y%m%d%H%M%S",
writer->file.name, formatDatetime(g_current_time, date_str, sizeof(date_str));
"%Y%m%d%H%M%S", date_str, sizeof(date_str))); fc_combine_two_strings(writer->file.name.str,
if (rename(writer->file.name, bak_filename) == 0) { date_str, '.', bak_filename);
if (rename(writer->file.name.str, bak_filename) == 0) {
logWarning("file: "__FILE__", line: %d, " logWarning("file: "__FILE__", line: %d, "
"binlog file %s exist, rename to %s", "binlog file %s exist, rename to %s",
__LINE__, writer->file.name, bak_filename); __LINE__, writer->file.name.str, bak_filename);
} else { } else {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"rename binlog %s to backup %s fail, " "rename binlog %s to backup %s fail, "
"errno: %d, error info: %s", "errno: %d, error info: %s",
__LINE__, writer->file.name, bak_filename, __LINE__, writer->file.name.str, bak_filename,
errno, STRERROR(errno)); errno, STRERROR(errno));
return errno != 0 ? errno : EPERM; return errno != 0 ? errno : EPERM;
} }
@ -172,35 +295,43 @@ static int do_write_to_file(SFFileWriterInfo *writer,
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"write to binlog file \"%s\" fail, " "write to binlog file \"%s\" fail, "
"errno: %d, error info: %s", "errno: %d, error info: %s",
__LINE__, writer->file.name, __LINE__, writer->file.name.str,
result, STRERROR(result)); result, STRERROR(result));
return result; return result;
} }
if (writer->cfg.call_fsync) {
if (fsync(writer->file.fd) != 0) { if (fsync(writer->file.fd) != 0) {
result = errno != 0 ? errno : EIO; result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"fsync to binlog file \"%s\" fail, " "fsync to binlog file \"%s\" fail, errno: %d, "
"errno: %d, error info: %s", "error info: %s", __LINE__, writer->file.name.str,
__LINE__, writer->file.name,
result, STRERROR(result)); result, STRERROR(result));
return result; return result;
} }
}
writer->file.size += len; writer->file.size += len;
if (writer->write_done_callback.func != NULL) {
writer->write_done_callback.func(writer,
writer->write_done_callback.args);
}
return 0; return 0;
} }
static int check_write_to_file(SFFileWriterInfo *writer, int sf_file_writer_direct_write(SFFileWriterInfo *writer,
char *buff, const int len) char *buff, const int len)
{ {
int result; int result;
if (writer->file.size + len <= SF_BINLOG_FILE_MAX_SIZE) { if ((writer->cfg.file_rotate_size <= 0) || (writer->file.size
+ len <= writer->cfg.file_rotate_size))
{
return do_write_to_file(writer, buff, len); return do_write_to_file(writer, buff, len);
} }
writer->binlog.index++; //binlog rotate writer->binlog.last_index++; //binlog rotate
if ((result=write_to_binlog_index_file(writer)) == 0) { if ((result=write_to_binlog_index_file(writer)) == 0) {
result = open_next_binlog(writer); result = open_next_binlog(writer);
} }
@ -208,7 +339,7 @@ static int check_write_to_file(SFFileWriterInfo *writer,
if (result != 0) { if (result != 0) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"open binlog file \"%s\" fail", "open binlog file \"%s\" fail",
__LINE__, writer->file.name); __LINE__, writer->file.name.str);
return result; return result;
} }
@ -220,58 +351,96 @@ int sf_file_writer_flush(SFFileWriterInfo *writer)
int result; int result;
int len; int len;
len = SF_BINLOG_BUFFER_LENGTH(writer->binlog_buffer); len = SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(writer->binlog_buffer);
if (len == 0) { if (len == 0) {
return 0; return 0;
} }
result = check_write_to_file(writer, writer->binlog_buffer.buff, len); if ((result=sf_file_writer_direct_write(writer, writer->
writer->binlog_buffer.end = writer->binlog_buffer.buff; binlog_buffer.buff, len)) == 0)
{
if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
writer->last_versions.done = writer->last_versions.pending;
}
}
writer->binlog_buffer.data_end = writer->binlog_buffer.buff;
return result; return result;
} }
int sf_file_writer_get_current_index(SFFileWriterInfo *writer) int sf_file_writer_fsync(SFFileWriterInfo *writer)
{ {
int result;
if ((result=sf_file_writer_flush(writer)) != 0) {
return result;
}
if (fsync(writer->file.fd) == 0) {
return 0;
} else {
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"fsync to binlog file \"%s\" fail, errno: %d, "
"error info: %s", __LINE__, writer->file.name.str,
result, STRERROR(result));
return result;
}
}
int sf_file_writer_get_indexes(SFFileWriterInfo *writer,
int *start_index, int *last_index)
{
int result;
if (writer == NULL) { //for data recovery if (writer == NULL) { //for data recovery
*start_index = *last_index = 0;
return 0; return 0;
} }
if (writer->binlog.index < 0) { if (writer->binlog.last_index < 0) {
get_binlog_index_from_file(writer); if ((result=get_binlog_index_from_file(writer)) != 0) {
*start_index = *last_index = -1;
return result;
}
} }
return writer->binlog.index; *start_index = writer->binlog.start_index;
*last_index = writer->binlog.last_index;
return 0;
} }
int sf_file_writer_deal_buffer(SFFileWriterInfo *writer, int sf_file_writer_deal_versioned_buffer(SFFileWriterInfo *writer,
BufferInfo *buffer, const int64_t version) BufferInfo *buffer, const int64_t version)
{ {
int result; int result;
if (buffer->length >= writer->binlog_buffer.size / 4) { if (buffer->length >= writer->binlog_buffer.size / 4) {
if (SF_BINLOG_BUFFER_LENGTH(writer->binlog_buffer) > 0) { if (SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(writer->binlog_buffer) > 0) {
if ((result=sf_file_writer_flush(writer)) != 0) { if ((result=sf_file_writer_flush(writer)) != 0) {
return result; return result;
} }
} }
if ((result=check_write_to_file(writer, buffer->buff, if ((result=sf_file_writer_direct_write(writer, buffer->
buffer->length)) == 0) buff, buffer->length)) == 0)
{ {
if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) { if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
writer->last_versions.pending = version; writer->last_versions.pending = version;
writer->last_versions.done = version;
} }
} }
return result; return result;
} }
if (writer->file.size + SF_BINLOG_BUFFER_LENGTH(writer-> if (writer->cfg.file_rotate_size > 0 && writer->file.size +
binlog_buffer) + buffer->length > SF_BINLOG_FILE_MAX_SIZE) SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(writer->binlog_buffer) +
buffer->length > writer->cfg.file_rotate_size)
{ {
if ((result=sf_file_writer_flush(writer)) != 0) { if ((result=sf_file_writer_flush(writer)) != 0) {
return result; return result;
} }
} else if (writer->binlog_buffer.size - SF_BINLOG_BUFFER_LENGTH( } else if (SF_BINLOG_BUFFER_PRODUCER_BUFF_REMAIN(
writer->binlog_buffer) < buffer->length) writer->binlog_buffer) < buffer->length)
{ {
if ((result=sf_file_writer_flush(writer)) != 0) { if ((result=sf_file_writer_flush(writer)) != 0) {
@ -282,15 +451,40 @@ int sf_file_writer_deal_buffer(SFFileWriterInfo *writer,
if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) { if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
writer->last_versions.pending = version; writer->last_versions.pending = version;
} }
memcpy(writer->binlog_buffer.end, buffer->buff, buffer->length); memcpy(writer->binlog_buffer.data_end, buffer->buff, buffer->length);
writer->binlog_buffer.end += buffer->length; writer->binlog_buffer.data_end += buffer->length;
return 0; return 0;
} }
int sf_file_writer_init_normal(SFFileWriterInfo *writer, int sf_file_writer_save_buffer_ex(SFFileWriterInfo *writer,
const char *data_path, const char *subdir_name, const int length, const bool flush)
const int buffer_size) {
int result;
if (writer->cfg.file_rotate_size > 0 && writer->file.size +
SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(writer->binlog_buffer) +
length > writer->cfg.file_rotate_size)
{
if ((result=sf_file_writer_flush(writer)) != 0) {
return result;
}
}
writer->binlog_buffer.data_end += length;
if (flush || SF_BINLOG_BUFFER_PRODUCER_BUFF_REMAIN(writer->
binlog_buffer) < writer->cfg.max_record_size)
{
return sf_file_writer_flush(writer);
} else {
return 0;
}
}
int sf_file_writer_init(SFFileWriterInfo *writer, const char *data_path,
const char *subdir_name, const char *file_prefix,
const int max_record_size, const int buffer_size,
const int64_t file_rotate_size, const bool call_fsync)
{ {
int result; int result;
int path_len; int path_len;
@ -301,15 +495,18 @@ int sf_file_writer_init_normal(SFFileWriterInfo *writer,
writer->last_versions.pending = 0; writer->last_versions.pending = 0;
writer->last_versions.done = 0; writer->last_versions.done = 0;
writer->flags = 0; writer->flags = 0;
sf_file_writer_set_write_done_callback(writer, NULL, NULL);
if ((result=sf_binlog_buffer_init(&writer-> if ((result=sf_binlog_buffer_init(&writer->
binlog_buffer, buffer_size)) != 0) binlog_buffer, buffer_size)) != 0)
{ {
return result; return result;
} }
writer->cfg.max_record_size = max_record_size;
writer->cfg.call_fsync = call_fsync;
writer->cfg.file_rotate_size = file_rotate_size;
writer->cfg.data_path = data_path; writer->cfg.data_path = data_path;
path_len = snprintf(filepath, sizeof(filepath), path_len = fc_combine_full_filepath(data_path, subdir_name, filepath);
"%s/%s", data_path, subdir_name);
if ((result=fc_check_mkdir_ex(filepath, 0775, &create)) != 0) { if ((result=fc_check_mkdir_ex(filepath, 0775, &create)) != 0) {
return result; return result;
} }
@ -318,11 +515,11 @@ int sf_file_writer_init_normal(SFFileWriterInfo *writer,
} }
writer->file.fd = -1; writer->file.fd = -1;
snprintf(writer->cfg.subdir_name, fc_safe_strcpy(writer->cfg.subdir_name, subdir_name);
sizeof(writer->cfg.subdir_name), fc_safe_strcpy(writer->cfg.file_prefix, file_prefix);
"%s", subdir_name); writer->file.name.size = path_len + 32;
writer->file.name = (char *)fc_malloc(path_len + 32); writer->file.name.str = (char *)fc_malloc(writer->file.name.size);
if (writer->file.name == NULL) { if (writer->file.name.str == NULL) {
return ENOMEM; return ENOMEM;
} }
@ -337,13 +534,59 @@ int sf_file_writer_init_normal(SFFileWriterInfo *writer,
return 0; return 0;
} }
int sf_file_writer_set_binlog_index(SFFileWriterInfo *writer, void sf_file_writer_destroy(SFFileWriterInfo *writer)
const int binlog_index) {
if (writer->file.fd >= 0) {
close(writer->file.fd);
writer->file.fd = -1;
}
if (writer->file.name.str != NULL) {
free(writer->file.name.str);
writer->file.name.str = NULL;
}
sf_binlog_buffer_destroy(&writer->binlog_buffer);
}
int sf_file_writer_set_indexes(SFFileWriterInfo *writer,
const int start_index, const int last_index)
{ {
int result; int result;
if (writer->binlog.index != binlog_index) { if (writer->binlog.start_index != start_index ||
writer->binlog.index = binlog_index; writer->binlog.last_index != last_index)
{
writer->binlog.start_index = start_index;
writer->binlog.last_index = last_index;
if ((result=write_to_binlog_index_file(writer)) != 0) {
return result;
}
}
return 0;
}
int sf_file_writer_set_binlog_start_index(SFFileWriterInfo *writer,
const int start_index)
{
int result;
if (writer->binlog.start_index != start_index) {
writer->binlog.start_index = start_index;
if ((result=write_to_binlog_index_file(writer)) != 0) {
return result;
}
}
return 0;
}
int sf_file_writer_set_binlog_write_index(SFFileWriterInfo *writer,
const int last_index)
{
int result;
if (writer->binlog.last_index != last_index) {
writer->binlog.last_index = last_index;
if ((result=write_to_binlog_index_file(writer)) != 0) { if ((result=write_to_binlog_index_file(writer)) != 0) {
return result; return result;
} }
@ -357,40 +600,108 @@ int sf_file_writer_get_last_lines(const char *data_path,
char *buff, const int buff_size, int *count, int *length) char *buff, const int buff_size, int *count, int *length)
{ {
int result; int result;
int remain_count; int target_count;
int current_count; int count1;
int current_index;
int i;
char filename[PATH_MAX]; char filename[PATH_MAX];
string_t lines; string_t lines;
current_index = current_write_index; target_count = *count;
*length = 0;
remain_count = *count;
for (i=0; i<2; i++) {
current_count = remain_count;
sf_file_writer_get_filename(data_path, subdir_name, sf_file_writer_get_filename(data_path, subdir_name,
current_index, filename, sizeof(filename)); current_write_index, filename, sizeof(filename));
result = fc_get_last_lines(filename, buff + *length, if (access(filename, F_OK) == 0) {
buff_size - *length, &lines, &current_count); if ((result=fc_get_last_lines(filename, buff, buff_size,
if (result == 0) { &lines, count)) != 0)
memmove(buff + *length, lines.str, lines.len); {
*length += lines.len; if (result != ENOENT) {
remain_count -= current_count;
if (remain_count == 0) {
break;
}
} else if (result != ENOENT) {
*count = 0;
return result; return result;
} }
if (current_index == 0) {
break;
} }
--current_index; //try previous binlog file if (*count >= target_count || current_write_index == 0) {
memmove(buff, lines.str, lines.len);
*length = lines.len;
return 0;
}
} else {
result = errno != 0 ? errno : EPERM;
if (result == ENOENT) {
*count = 0;
*length = 0;
return 0;
} else {
logError("file: "__FILE__", line: %d, "
"stat file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
*count = 0;
*length = 0;
return result;
}
}
sf_file_writer_get_filename(data_path, subdir_name,
current_write_index - 1, filename, sizeof(filename));
if (access(filename, F_OK) != 0) {
result = errno != 0 ? errno : EPERM;
if (result == ENOENT) {
memmove(buff, lines.str, lines.len);
*length = lines.len;
return 0;
} else {
logError("file: "__FILE__", line: %d, "
"stat file %s fail, errno: %d, error info: %s",
__LINE__, filename, result, STRERROR(result));
*count = 0;
*length = 0;
return result;
}
}
count1 = target_count - *count;
if ((result=fc_get_last_lines(filename, buff,
buff_size, &lines, &count1)) != 0)
{
*count = 0;
*length = 0;
return result;
}
memmove(buff, lines.str, lines.len);
*length = lines.len;
if (*count == 0) {
*count = count1;
} else {
sf_file_writer_get_filename(data_path, subdir_name,
current_write_index, filename, sizeof(filename));
if ((result=fc_get_first_lines(filename, buff + (*length),
buff_size - (*length), &lines, count)) != 0)
{
*count = 0;
*length = 0;
return result;
}
*count += count1;
*length += lines.len;
} }
*count -= remain_count;
return 0; return 0;
} }
int sf_file_writer_get_last_line(const char *data_path,
const char *subdir_name, char *buff,
const int buff_size, int *length)
{
int result;
int last_index;
int count = 1;
if ((result=sf_file_writer_get_binlog_last_index(data_path,
subdir_name, &last_index)) != 0)
{
*length = 0;
return result;
}
return sf_file_writer_get_last_lines(data_path, subdir_name,
last_index, buff, buff_size, &count, length);
}

View File

@ -24,29 +24,42 @@
#define SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION 1 #define SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION 1
#define SF_BINLOG_SUBDIR_NAME_SIZE 128 #define SF_BINLOG_SUBDIR_NAME_SIZE 128
#define SF_BINLOG_FILE_MAX_SIZE (1024 * 1024 * 1024) //for binlog rotating by size #define SF_BINLOG_FILE_PREFIX_STR_SIZE 64
#define SF_BINLOG_FILE_PREFIX "binlog" #define SF_BINLOG_DEFAULT_ROTATE_SIZE (1024 * 1024 * 1024)
#define SF_BINLOG_FILE_EXT_FMT ".%06d" #define SF_BINLOG_NEVER_ROTATE_FILE 0
#define SF_BINLOG_FILE_PREFIX_STR "binlog"
#define SF_BINLOG_FILE_PREFIX_LEN (sizeof(SF_BINLOG_FILE_PREFIX_STR) - 1)
#define SF_BINLOG_FILE_EXT_LEN 6
#define SF_BINLOG_FILE_EXT_FMT ".%0"FC_MACRO_TOSTRING(SF_BINLOG_FILE_EXT_LEN)"d"
#define SF_BINLOG_BUFFER_LENGTH(buffer) ((buffer).end - (buffer).buff) struct sf_file_writer_info;
#define SF_BINLOG_BUFFER_REMAIN(buffer) ((buffer).end - (buffer).current)
typedef void (*sf_file_write_done_callback)(
struct sf_file_writer_info *writer, void *args);
typedef struct sf_file_writer_info { typedef struct sf_file_writer_info {
struct { struct {
const char *data_path; const char *data_path;
char subdir_name[SF_BINLOG_SUBDIR_NAME_SIZE]; char subdir_name[SF_BINLOG_SUBDIR_NAME_SIZE];
char file_prefix[SF_BINLOG_FILE_PREFIX_STR_SIZE];
int64_t file_rotate_size;
int max_record_size; int max_record_size;
bool call_fsync;
} cfg; } cfg;
struct { struct {
int index; int start_index; //for read only
int last_index; //for write
int compress_index; int compress_index;
} binlog; } binlog;
struct { struct {
int fd; int fd;
int64_t size; int64_t size; //file size
char *name; struct {
char *str;
int size;
} name;
} file; } file;
int64_t total_count; int64_t total_count;
@ -57,73 +70,252 @@ typedef struct sf_file_writer_info {
int64_t pending; int64_t pending;
volatile int64_t done; volatile int64_t done;
} last_versions; } last_versions;
struct {
sf_file_write_done_callback func;
void *args;
} write_done_callback;
} SFFileWriterInfo; } SFFileWriterInfo;
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
int sf_file_writer_init_normal(SFFileWriterInfo *writer, int sf_file_writer_init(SFFileWriterInfo *writer, const char *data_path,
const char *data_path, const char *subdir_name, const char *subdir_name, const char *file_prefix,
const int buffer_size); const int max_record_size, const int buffer_size,
const int64_t file_rotate_size, const bool call_fsync);
int sf_file_writer_deal_buffer(SFFileWriterInfo *writer, void sf_file_writer_destroy(SFFileWriterInfo *writer);
int sf_file_writer_direct_write(SFFileWriterInfo *writer,
char *buff, const int len);
int sf_file_writer_deal_versioned_buffer(SFFileWriterInfo *writer,
BufferInfo *buffer, const int64_t version); BufferInfo *buffer, const int64_t version);
#define sf_file_writer_deal_buffer(writer, buffer) \
sf_file_writer_deal_versioned_buffer(writer, buffer, 0)
int sf_file_writer_flush(SFFileWriterInfo *writer); int sf_file_writer_flush(SFFileWriterInfo *writer);
int sf_file_writer_fsync(SFFileWriterInfo *writer);
#define SF_FILE_WRITER_DATA_END_BUFF(writer) (writer)->binlog_buffer.data_end
#define SF_FILE_WRITER_CURRENT_DATA_VERSION(writer) \
(writer)->last_versions.pending
#define SF_FILE_WRITER_NEXT_DATA_VERSION(writer) \
++((writer)->last_versions.pending)
int sf_file_writer_save_buffer_ex(SFFileWriterInfo *writer,
const int length, const bool flush);
static inline int sf_file_writer_save_buffer(
SFFileWriterInfo *writer, const int length)
{
const bool flush = false;
return sf_file_writer_save_buffer_ex(writer, length, flush);
}
static inline int sf_file_writer_flush_buffer(
SFFileWriterInfo *writer, const int length)
{
const bool flush = true;
return sf_file_writer_save_buffer_ex(writer, length, flush);
}
static inline void sf_file_writer_set_flags( static inline void sf_file_writer_set_flags(
SFFileWriterInfo *writer, const short flags) SFFileWriterInfo *writer, const short flags)
{ {
writer->flags = flags; writer->flags = flags;
} }
static inline int64_t sf_file_writer_get_last_version( static inline void sf_file_writer_set_call_fsync(
SFFileWriterInfo *writer) SFFileWriterInfo *writer, const bool call_fsync)
{
writer->cfg.call_fsync = call_fsync;
}
static inline void sf_file_writer_set_write_done_callback (
SFFileWriterInfo *writer, sf_file_write_done_callback callback,
void *args)
{
writer->write_done_callback.func = callback;
writer->write_done_callback.args = args;
}
static inline int64_t sf_file_writer_get_last_version_ex(
SFFileWriterInfo *writer, const int log_level)
{ {
if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) { if (writer->flags & SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION) {
return writer->last_versions.done; return writer->last_versions.done;
} else { } else {
logError("file: "__FILE__", line: %d, " if (FC_LOG_BY_LEVEL(log_level)) {
"should set writer flags to %d!", __LINE__, log_it_ex(&g_log_context, log_level, "file: %s, line: %d, "
"writer: %s, should set writer flags to %d!",
__FILE__, __LINE__, writer->cfg.subdir_name,
SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION); SF_FILE_WRITER_FLAGS_WANT_DONE_VERSION);
}
return -1; return -1;
} }
} }
int sf_file_writer_get_current_index(SFFileWriterInfo *writer); #define sf_file_writer_get_last_version(writer) \
sf_file_writer_get_last_version_ex(writer, LOG_ERR)
#define sf_file_writer_get_last_version_silence(writer) \
sf_file_writer_get_last_version_ex(writer, LOG_NOTHING)
int sf_file_writer_get_binlog_indexes(const char *data_path,
const char *subdir_name, int *start_index, int *last_index);
static inline int sf_file_writer_get_binlog_start_index(
const char *data_path, const char *subdir_name,
int *start_index)
{
int last_index;
return sf_file_writer_get_binlog_indexes(data_path,
subdir_name, start_index, &last_index);
}
static inline int sf_file_writer_get_binlog_last_index(
const char *data_path, const char *subdir_name,
int *last_index)
{
int start_index;
return sf_file_writer_get_binlog_indexes(data_path,
subdir_name, &start_index, last_index);
}
int sf_file_writer_set_indexes(SFFileWriterInfo *writer,
const int start_index, const int last_index);
int sf_file_writer_get_indexes(SFFileWriterInfo *writer,
int *start_index, int *last_index);
static inline int sf_file_writer_get_start_index(SFFileWriterInfo *writer)
{
int start_index;
int last_index;
sf_file_writer_get_indexes(writer, &start_index, &last_index);
return start_index;
}
static inline int sf_file_writer_get_last_index(SFFileWriterInfo *writer)
{
int start_index;
int last_index;
sf_file_writer_get_indexes(writer, &start_index, &last_index);
return last_index;
}
#define sf_file_writer_get_current_write_index(writer) \
sf_file_writer_get_last_index(writer)
static inline void sf_file_writer_get_current_position( static inline void sf_file_writer_get_current_position(
SFFileWriterInfo *writer, SFBinlogFilePosition *position) SFFileWriterInfo *writer, SFBinlogFilePosition *position)
{ {
position->index = writer->binlog.index; position->index = writer->binlog.last_index;
position->offset = writer->file.size; position->offset = writer->file.size;
} }
static inline const char *sf_file_writer_get_filepath( static inline const char *sf_file_writer_get_filepath(
const char *data_path, const char *subdir_name, const char *data_path, const char *subdir_name,
char *filepath, const int size)
{
fc_get_full_filepath_ex(data_path, strlen(data_path),
subdir_name, strlen(subdir_name), filepath, size);
return filepath;
}
static inline const char *sf_file_writer_get_filename_ex(
const char *data_path, const char *subdir_name,
const char *file_prefix, const int binlog_index,
char *filename, const int size) char *filename, const int size)
{ {
snprintf(filename, size, "%s/%s", data_path, subdir_name); char *p;
int data_path_len;
int subdir_name_len;
int file_prefix_len;
data_path_len = strlen(data_path);
subdir_name_len = strlen(subdir_name);
file_prefix_len = strlen(file_prefix);
if (data_path_len + subdir_name_len + file_prefix_len +
4 + SF_BINLOG_FILE_EXT_LEN >= size)
{
snprintf(filename, size, "%s/%s/%s"SF_BINLOG_FILE_EXT_FMT,
data_path, subdir_name, file_prefix, binlog_index);
return filename;
}
p = filename;
memcpy(p, data_path, data_path_len);
p += data_path_len;
*p++ = '/';
memcpy(p, subdir_name, subdir_name_len);
p += subdir_name_len;
*p++ = '/';
memcpy(p, file_prefix, file_prefix_len);
p += file_prefix_len;
*p++ = '.';
fc_ltostr_ex(binlog_index, p, SF_BINLOG_FILE_EXT_LEN);
return filename; return filename;
} }
static inline const char *sf_file_writer_get_filename( #define sf_file_writer_get_filename(data_path, subdir_name, \
const char *data_path, const char *subdir_name, binlog_index, filename, size) \
const int binlog_index, char *filename, const int size) sf_file_writer_get_filename_ex(data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, binlog_index, filename, size)
const char *sf_file_writer_get_index_filename(const char *data_path,
const char *subdir_name, char *filename, const int size);
int sf_file_writer_set_binlog_start_index(SFFileWriterInfo *writer,
const int start_index);
int sf_file_writer_set_binlog_write_index(SFFileWriterInfo *writer,
const int last_index);
static inline int sf_file_writer_rotate_file(SFFileWriterInfo *writer)
{ {
snprintf(filename, size, "%s/%s/%s"SF_BINLOG_FILE_EXT_FMT, data_path, int last_index;
subdir_name, SF_BINLOG_FILE_PREFIX, binlog_index); last_index = sf_file_writer_get_current_write_index(writer);
return filename; return sf_file_writer_set_binlog_write_index(writer, last_index + 1);
} }
int sf_file_writer_set_binlog_index(SFFileWriterInfo *writer,
const int binlog_index);
int sf_file_writer_get_last_lines(const char *data_path, int sf_file_writer_get_last_lines(const char *data_path,
const char *subdir_name, const int current_write_index, const char *subdir_name, const int current_write_index,
char *buff, const int buff_size, int *count, int *length); char *buff, const int buff_size, int *count, int *length);
static inline int sf_file_writer_get_last_line_ex(const char *data_path,
const char *subdir_name, const int current_write_index,
char *buff, const int buff_size, int *length)
{
int count = 1;
return sf_file_writer_get_last_lines(data_path, subdir_name,
current_write_index, buff, buff_size, &count, length);
}
int sf_file_writer_get_last_line(const char *data_path,
const char *subdir_name, char *buff,
const int buff_size, int *length);
int sf_file_writer_write_to_binlog_index_file_ex(const char *data_path,
const char *subdir_name, const char *file_prefix,
const int start_index, const int last_index,
const int compress_index);
#define sf_file_writer_write_to_binlog_index_file(data_path, \
subdir_name, start_index, last_index) \
sf_file_writer_write_to_binlog_index_file_ex(data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, start_index, last_index, 0)
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -38,16 +38,17 @@ int sf_connect_to_server(const char *ip_addr, const int port, int *sock)
if(*sock < 0) { if(*sock < 0) {
return errno != 0 ? errno : ENOMEM; return errno != 0 ? errno : ENOMEM;
} }
tcpsetserveropt(*sock, g_sf_global_vars.network_timeout); tcpsetserveropt(*sock, g_sf_global_vars.net_buffer_cfg.network_timeout);
if ((result=tcpsetnonblockopt(*sock)) != 0) { if ((result=tcpsetnonblockopt(*sock)) != 0) {
close(*sock); close(*sock);
*sock = -1; *sock = -1;
return result; return result;
} }
FC_SET_CLOEXEC(*sock);
if ((result=connectserverbyip_nb(*sock, ip_addr, port, if ((result=connectserverbyip_nb(*sock, ip_addr, port, g_sf_global_vars.
g_sf_global_vars.connect_timeout)) != 0) net_buffer_cfg.connect_timeout)) != 0)
{ {
close(*sock); close(*sock);
*sock = -1; *sock = -1;

View File

@ -37,7 +37,7 @@ static inline void sf_terminate_myself_ex(const char *file,
{ {
g_sf_global_vars.continue_flag = false; g_sf_global_vars.continue_flag = false;
if (kill(getpid(), SIGQUIT) == 0) { //signal myself to quit if (kill(getpid(), SIGQUIT) == 0) { //signal myself to quit
logInfo("file: "__FILE__", line: %d, " logWarning("file: "__FILE__", line: %d, "
"kill myself from caller {file: %s, line: %d, func: %s}", "kill myself from caller {file: %s, line: %d, func: %s}",
__LINE__, file, line, func); __LINE__, file, line, func);
} else { } else {
@ -56,7 +56,8 @@ static inline int sf_binlog_buffer_init(SFBinlogBuffer *buffer, const int size)
return ENOMEM; return ENOMEM;
} }
buffer->current = buffer->end = buffer->buff; buffer->current = buffer->data_end = buffer->buff;
buffer->buff_end = buffer->buff + size;
buffer->size = size; buffer->size = size;
return 0; return 0;
} }
@ -65,7 +66,8 @@ static inline void sf_binlog_buffer_destroy(SFBinlogBuffer *buffer)
{ {
if (buffer->buff != NULL) { if (buffer->buff != NULL) {
free(buffer->buff); free(buffer->buff);
buffer->current = buffer->end = buffer->buff = NULL; buffer->current = buffer->buff = NULL;
buffer->data_end = buffer->buff_end = NULL;
buffer->size = 0; buffer->size = 0;
} }
} }
@ -76,6 +78,11 @@ static inline int sf_synchronize_ctx_init(SFSynchronizeContext *sctx)
return init_pthread_lock_cond_pair(&sctx->lcp); return init_pthread_lock_cond_pair(&sctx->lcp);
} }
static inline void sf_synchronize_ctx_destroy(SFSynchronizeContext *sctx)
{
destroy_pthread_lock_cond_pair(&sctx->lcp);
}
static inline void sf_synchronize_counter_add( static inline void sf_synchronize_counter_add(
SFSynchronizeContext *sctx, const int count) SFSynchronizeContext *sctx, const int count)
{ {
@ -106,12 +113,40 @@ static inline void sf_synchronize_counter_notify(
static inline void sf_synchronize_counter_wait(SFSynchronizeContext *sctx) static inline void sf_synchronize_counter_wait(SFSynchronizeContext *sctx)
{ {
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock); PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
while (sctx->waiting_count != 0) { while (sctx->waiting_count != 0 && SF_G_CONTINUE_FLAG) {
pthread_cond_wait(&sctx->lcp.cond, &sctx->lcp.lock); pthread_cond_wait(&sctx->lcp.cond, &sctx->lcp.lock);
} }
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock); PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
} }
#define sf_synchronize_finished_notify_no_lock(sctx, err_no) \
(sctx)->finished = true; \
(sctx)->result = err_no; \
pthread_cond_signal(&(sctx)->lcp.cond)
static inline void sf_synchronize_finished_notify(
SFSynchronizeContext *sctx, const int result)
{
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
sf_synchronize_finished_notify_no_lock(sctx, result);
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
}
static inline int sf_synchronize_finished_wait(SFSynchronizeContext *sctx)
{
int result;
PTHREAD_MUTEX_LOCK(&sctx->lcp.lock);
while (!sctx->finished && SF_G_CONTINUE_FLAG) {
pthread_cond_wait(&sctx->lcp.cond, &sctx->lcp.lock);
}
result = sctx->result;
sctx->finished = false; //for next notify
PTHREAD_MUTEX_UNLOCK(&sctx->lcp.lock);
return result;
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -30,32 +30,36 @@ typedef struct sf_connection_stat {
} SFConnectionStat; } SFConnectionStat;
typedef struct sf_global_variables { typedef struct sf_global_variables {
int connect_timeout;
int network_timeout;
struct { struct {
char str[MAX_PATH_SIZE]; char str[MAX_PATH_SIZE];
int len;
bool inited; bool inited;
bool created;
} base_path; } base_path;
volatile bool continue_flag; volatile bool continue_flag;
bool tcp_quick_ack; bool tcp_quick_ack;
int max_connections; bool epoll_edge_trigger;
int max_pkg_size;
int min_buff_size; SFNetBufferConfig net_buffer_cfg;
int max_buff_size;
int task_buffer_extra_size; int task_buffer_extra_size;
int thread_stack_size; int thread_stack_size;
time_t up_time; time_t up_time;
gid_t run_by_gid; struct {
uid_t run_by_uid; bool inited;
char run_by_group[32]; gid_t gid;
char run_by_user[32]; uid_t uid;
char group[32];
char user[32];
} run_by;
SFLogConfig error_log; SFLogConfig error_log;
SFConnectionStat connection_stat; SFConnectionStat connection_stat;
sf_error_handler_callback error_handler; sf_error_handler_callback error_handler;
string_t empty; string_t empty;
volatile time_t last_binlog_writer_log_timestamp;
} SFGlobalVariables; } SFGlobalVariables;
typedef struct sf_context_ini_config { typedef struct sf_context_ini_config {
@ -63,6 +67,9 @@ typedef struct sf_context_ini_config {
int default_inner_port; int default_inner_port;
int default_outer_port; int default_outer_port;
int default_work_threads; int default_work_threads;
int max_pkg_size_min_value;
FCCommunicationType comm_type;
const char *max_pkg_size_item_name;
} SFContextIniConfig; } SFContextIniConfig;
#ifdef __cplusplus #ifdef __cplusplus
@ -73,13 +80,36 @@ extern SFGlobalVariables g_sf_global_vars;
extern SFContext g_sf_context; extern SFContext g_sf_context;
#define SF_G_BASE_PATH_STR g_sf_global_vars.base_path.str #define SF_G_BASE_PATH_STR g_sf_global_vars.base_path.str
#define SF_G_BASE_PATH_LEN g_sf_global_vars.base_path.len
#define SF_G_BASE_PATH_INITED g_sf_global_vars.base_path.inited #define SF_G_BASE_PATH_INITED g_sf_global_vars.base_path.inited
#define SF_G_BASE_PATH_CREATED g_sf_global_vars.base_path.created
#define SF_G_CONTINUE_FLAG g_sf_global_vars.continue_flag #define SF_G_CONTINUE_FLAG g_sf_global_vars.continue_flag
#define SF_G_CONNECT_TIMEOUT g_sf_global_vars.connect_timeout #define SF_G_CONNECT_TIMEOUT g_sf_global_vars.net_buffer_cfg.connect_timeout
#define SF_G_NETWORK_TIMEOUT g_sf_global_vars.network_timeout #define SF_G_NETWORK_TIMEOUT g_sf_global_vars.net_buffer_cfg.network_timeout
#define SF_G_MAX_CONNECTIONS g_sf_global_vars.max_connections #define SF_G_MAX_CONNECTIONS g_sf_global_vars.net_buffer_cfg.max_connections
#define SF_G_THREAD_STACK_SIZE g_sf_global_vars.thread_stack_size #define SF_G_THREAD_STACK_SIZE g_sf_global_vars.thread_stack_size
#define SF_G_UP_TIME g_sf_global_vars.up_time
#define SF_G_SOCK_HANDLER (g_sf_context.handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].handlers + \
SF_SOCKET_NETWORK_HANDLER_INDEX)
#define SF_G_OUTER_PORT SF_G_SOCK_HANDLER->outer.port
#define SF_G_INNER_PORT SF_G_SOCK_HANDLER->inner.port
#define SF_G_OUTER_BIND_ADDR4 g_sf_context.handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].outer_bind_addr
#define SF_G_INNER_BIND_ADDR4 g_sf_context.handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].inner_bind_addr
#define SF_G_OUTER_BIND_ADDR6 g_sf_context.handlers \
[SF_IPV6_ADDRESS_FAMILY_INDEX].outer_bind_addr
#define SF_G_INNER_BIND_ADDR6 g_sf_context.handlers \
[SF_IPV6_ADDRESS_FAMILY_INDEX].inner_bind_addr
#define SF_G_IPV4_ENABLED (g_sf_context.handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].af == AF_INET)
#define SF_G_IPV6_ENABLED (g_sf_context.handlers \
[SF_IPV6_ADDRESS_FAMILY_INDEX].af == AF_INET6)
#define SF_G_ACCEPT_THREADS g_sf_context.accept_threads
#define SF_G_WORK_THREADS g_sf_context.work_threads #define SF_G_WORK_THREADS g_sf_context.work_threads
#define SF_G_ALIVE_THREAD_COUNT g_sf_context.thread_count #define SF_G_ALIVE_THREAD_COUNT g_sf_context.thread_count
#define SF_G_THREAD_INDEX(tdata) (int)(tdata - g_sf_context.thread_data) #define SF_G_THREAD_INDEX(tdata) (int)(tdata - g_sf_context.thread_data)
@ -88,18 +118,28 @@ extern SFContext g_sf_context;
#define SF_G_ERROR_HANDLER g_sf_global_vars.error_handler #define SF_G_ERROR_HANDLER g_sf_global_vars.error_handler
#define SF_G_EMPTY_STRING g_sf_global_vars.empty #define SF_G_EMPTY_STRING g_sf_global_vars.empty
#define LAST_BINLOG_WRITER_LOG_TIMESTAMP g_sf_global_vars. \
last_binlog_writer_log_timestamp
#define SF_WORK_THREADS(sf_context) sf_context.work_threads #define SF_G_EPOLL_EDGE_TRIGGER g_sf_global_vars.epoll_edge_trigger
#define SF_ALIVE_THREAD_COUNT(sf_context) sf_context.thread_count
#define SF_THREAD_INDEX(sf_context, tdata) (int)(tdata - sf_context.thread_data) #define SF_WORK_THREADS(sf_context) (sf_context).work_threads
#define SF_ALIVE_THREAD_COUNT(sf_context) (sf_context).thread_count
#define SF_THREAD_INDEX(sf_context, tdata) (int)(tdata - (sf_context).thread_data)
#define SF_IPV4_ENABLED(sf_context) ((sf_context).handlers \
[SF_IPV4_ADDRESS_FAMILY_INDEX].af == AF_INET)
#define SF_IPV6_ENABLED(sf_context) ((sf_context).handlers \
[SF_IPV6_ADDRESS_FAMILY_INDEX].af == AF_INET6)
#define SF_CHOWN_RETURN_ON_ERROR(path, current_uid, current_gid) \ #define SF_CHOWN_RETURN_ON_ERROR(path, current_uid, current_gid) \
do { \ do { \
if (!(g_sf_global_vars.run_by_gid == current_gid && \ if (g_sf_global_vars.run_by.inited && !(g_sf_global_vars. \
g_sf_global_vars.run_by_uid == current_uid)) \ run_by.gid == current_gid && g_sf_global_vars. \
run_by.uid == current_uid)) \
{ \ { \
if (chown(path, g_sf_global_vars.run_by_uid, \ if (chown(path, g_sf_global_vars.run_by.uid, \
g_sf_global_vars.run_by_gid) != 0) \ g_sf_global_vars.run_by.gid) != 0) \
{ \ { \
logError("file: "__FILE__", line: %d, " \ logError("file: "__FILE__", line: %d, " \
"chown \"%s\" fail, " \ "chown \"%s\" fail, " \
@ -113,62 +153,115 @@ extern SFContext g_sf_context;
#define SF_CHOWN_TO_RUNBY_RETURN_ON_ERROR(path) \ #define SF_CHOWN_TO_RUNBY_RETURN_ON_ERROR(path) \
SF_CHOWN_RETURN_ON_ERROR(path, geteuid(), getegid()) SF_CHOWN_RETURN_ON_ERROR(path, geteuid(), getegid())
#define SF_SET_CONTEXT_INI_CONFIG(config, filename, pIniContext, \
section_name, def_inner_port, def_outer_port, def_work_threads) \ #define SF_FCHOWN_RETURN_ON_ERROR(fd, path, current_uid, current_gid) \
do { \
if (g_sf_global_vars.run_by.inited && !(g_sf_global_vars. \
run_by.gid == current_gid && g_sf_global_vars. \
run_by.uid == current_uid)) \
{ \
if (fchown(fd, g_sf_global_vars.run_by.uid, \
g_sf_global_vars.run_by.gid) != 0) \
{ \
logError("file: "__FILE__", line: %d, " \
"fchown \"%s\" fail, " \
"errno: %d, error info: %s", \
__LINE__, path, errno, STRERROR(errno)); \
return errno != 0 ? errno : EPERM; \
} \
} \
} while (0)
#define SF_FCHOWN_TO_RUNBY_RETURN_ON_ERROR(fd, path) \
SF_FCHOWN_RETURN_ON_ERROR(fd, path, geteuid(), getegid())
#define SF_SET_CONTEXT_INI_CONFIG_EX(config, the_comm_type, filename, \
pIniContext, section_name, def_inner_port, def_outer_port, \
def_work_threads, max_pkg_size_item_nm, max_pkg_size_min_val) \
do { \ do { \
FAST_INI_SET_FULL_CTX_EX(config.ini_ctx, filename, \ FAST_INI_SET_FULL_CTX_EX(config.ini_ctx, filename, \
section_name, pIniContext); \ section_name, pIniContext); \
config.comm_type = the_comm_type; \
config.default_inner_port = def_inner_port; \ config.default_inner_port = def_inner_port; \
config.default_outer_port = def_outer_port; \ config.default_outer_port = def_outer_port; \
config.default_work_threads = def_work_threads; \ config.default_work_threads = def_work_threads; \
config.max_pkg_size_item_name = max_pkg_size_item_nm; \
config.max_pkg_size_min_value = max_pkg_size_min_val; \
} while (0) } while (0)
int sf_load_global_config_ex(const char *server_name, #define SF_SET_CONTEXT_INI_CONFIG(config, the_comm_type, \
IniFullContext *ini_ctx, const bool load_network_params, filename, pIniContext, section_name, def_inner_port, \
const int task_buffer_extra_size); def_outer_port, def_work_threads) \
SF_SET_CONTEXT_INI_CONFIG_EX(config, the_comm_type, filename, \
pIniContext, section_name, def_inner_port, def_outer_port, \
def_work_threads, "max_pkg_size", 0)
static inline int sf_load_global_config(const char *server_name, int sf_load_global_config_ex(const char *log_filename_prefix,
IniFullContext *ini_ctx, const bool load_network_params,
const char *max_pkg_size_item_nm, const int fixed_buff_size,
const int task_buffer_extra_size, const bool need_set_run_by);
static inline int sf_load_global_config(const char *log_filename_prefix,
IniFullContext *ini_ctx) IniFullContext *ini_ctx)
{ {
const bool load_network_params = true; const bool load_network_params = true;
const char *max_pkg_size_item_nm = "max_pkg_size";
const int fixed_buff_size = 0;
const int task_buffer_extra_size = 0; const int task_buffer_extra_size = 0;
const bool need_set_run_by = true;
return sf_load_global_config_ex(server_name, ini_ctx, return sf_load_global_config_ex(log_filename_prefix, ini_ctx,
load_network_params, task_buffer_extra_size); load_network_params, max_pkg_size_item_nm, fixed_buff_size,
task_buffer_extra_size, need_set_run_by);
} }
int sf_load_config_ex(const char *server_name, int sf_load_config_ex(const char *log_filename_prefix,
SFContextIniConfig *config, const int task_buffer_extra_size); SFContextIniConfig *config, const int fixed_buff_size,
const int task_buffer_extra_size, const bool need_set_run_by);
static inline int sf_load_config(const char *server_name, static inline int sf_load_config(const char *log_filename_prefix,
const FCCommunicationType comm_type,
const char *filename, IniContext *pIniContext, const char *filename, IniContext *pIniContext,
const char *section_name, const int default_inner_port, const char *section_name, const int default_inner_port,
const int default_outer_port, const int task_buffer_extra_size) const int default_outer_port, const int fixed_buff_size,
const int task_buffer_extra_size)
{ {
const bool need_set_run_by = true;
SFContextIniConfig config; SFContextIniConfig config;
SF_SET_CONTEXT_INI_CONFIG(config, filename, pIniContext, SF_SET_CONTEXT_INI_CONFIG(config, comm_type, filename, pIniContext,
section_name, default_inner_port, default_outer_port, section_name, default_inner_port, default_outer_port,
DEFAULT_WORK_THREADS); DEFAULT_WORK_THREADS);
return sf_load_config_ex(server_name, &config, task_buffer_extra_size); return sf_load_config_ex(log_filename_prefix, &config, fixed_buff_size,
task_buffer_extra_size, need_set_run_by);
} }
int sf_load_context_from_config_ex(SFContext *sf_context, int sf_load_context_from_config_ex(SFContext *sf_context,
SFContextIniConfig *config); SFContextIniConfig *config, const int fixed_buff_size,
const int task_buffer_extra_size);
static inline int sf_load_context_from_config(SFContext *sf_context, static inline int sf_load_context_from_config(SFContext *sf_context,
const FCCommunicationType comm_type,
const char *filename, IniContext *pIniContext, const char *filename, IniContext *pIniContext,
const char *section_name, const int default_inner_port, const char *section_name, const int default_inner_port,
const int default_outer_port) const int default_outer_port, const int fixed_buff_size,
const int task_buffer_extra_size)
{ {
SFContextIniConfig config; SFContextIniConfig config;
SF_SET_CONTEXT_INI_CONFIG(config, filename, pIniContext, SF_SET_CONTEXT_INI_CONFIG(config, comm_type, filename, pIniContext,
section_name, default_inner_port, default_outer_port, section_name, default_inner_port, default_outer_port,
DEFAULT_WORK_THREADS); DEFAULT_WORK_THREADS);
return sf_load_context_from_config_ex(sf_context, &config); return sf_load_context_from_config_ex(sf_context, &config,
fixed_buff_size, task_buffer_extra_size);
} }
int sf_alloc_rdma_pd(SFContext *sf_context,
FCAddressPtrArray *address_array);
void sf_set_address_family_by_ip(SFContext *sf_context,
FCAddressPtrArray *address_array);
int sf_load_log_config(IniFullContext *ini_ctx, LogContext *log_ctx, int sf_load_log_config(IniFullContext *ini_ctx, LogContext *log_ctx,
SFLogConfig *log_cfg); SFLogConfig *log_cfg);
@ -193,7 +286,14 @@ void sf_log_config_to_string_ex(SFLogConfig *log_cfg, const char *caption,
void sf_slow_log_config_to_string(SFSlowLogConfig *slow_log_cfg, void sf_slow_log_config_to_string(SFSlowLogConfig *slow_log_cfg,
const char *caption, char *output, const int size); const char *caption, char *output, const int size);
void sf_global_config_to_string(char *output, const int size); void sf_global_config_to_string_ex(const char *max_pkg_size_item_nm,
char *output, const int size);
static inline void sf_global_config_to_string(char *output, const int size)
{
const char *max_pkg_size_item_nm = "max_pkg_size";
sf_global_config_to_string_ex(max_pkg_size_item_nm, output, size);
}
void sf_context_config_to_string(const SFContext *sf_context, void sf_context_config_to_string(const SFContext *sf_context,
char *output, const int size); char *output, const int size);
@ -205,10 +305,22 @@ void sf_log_config_ex(const char *other_config);
#define sf_log_config_to_string(log_cfg, caption, output, size) \ #define sf_log_config_to_string(log_cfg, caption, output, size) \
sf_log_config_to_string_ex(log_cfg, caption, NULL, output, size) sf_log_config_to_string_ex(log_cfg, caption, NULL, output, size)
int sf_get_base_path_from_conf_file(const char *config_filename);
int sf_load_global_base_path(IniFullContext *ini_ctx);
int sf_load_data_path_config_ex(IniFullContext *ini_ctx,
const char *item_name, const char *default_value, string_t *path);
#define sf_load_data_path_config(ini_ctx, path) \
sf_load_data_path_config_ex(ini_ctx, "data_path", "data", path)
static inline void sf_set_global_base_path(const char *base_path) static inline void sf_set_global_base_path(const char *base_path)
{ {
snprintf(SF_G_BASE_PATH_STR, sizeof(SF_G_BASE_PATH_STR), string_t path_string;
"%s", base_path);
FC_SET_STRING(path_string, (char *)base_path);
SF_G_BASE_PATH_LEN = normalize_path(NULL, &path_string,
SF_G_BASE_PATH_STR, sizeof(SF_G_BASE_PATH_STR));
SF_G_BASE_PATH_INITED = true; SF_G_BASE_PATH_INITED = true;
} }

197
src/sf_iov.c Normal file
View File

@ -0,0 +1,197 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "fastcommon/logger.h"
#include "fastcommon/sockopt.h"
#include "sf_define.h"
#include "sf_iov.h"
int sf_iova_consume(SFDynamicIOVArray *iova, const int consume_len)
{
struct iovec *iob;
struct iovec *end;
int sum_bytes;
int remain_len;
int result;
if (iova->cnt <= 0) {
logError("file: "__FILE__", line: %d, "
"invalid iov count: %d", __LINE__, iova->cnt);
return EINVAL;
}
if ((result=sf_iova_check_alloc(iova)) != 0) {
return result;
}
end = iova->iov + iova->cnt;
iob = iova->iov;
sum_bytes = iob->iov_len;
for (iob=iob + 1; sum_bytes <= consume_len && iob < end; iob++) {
sum_bytes += iob->iov_len;
}
if (sum_bytes < consume_len) {
logError("file: "__FILE__", line: %d, "
"iov length: %d < consume length: %d",
__LINE__, sum_bytes, consume_len);
return EOVERFLOW;
}
iova->cnt -= (iob - iova->iov);
iova->iov = iob;
if (iova->cnt == 0) {
struct iovec *last;
/* update the last iov for next slice */
last = iob - 1;
last->iov_base = (char *)last->iov_base + last->iov_len;
last->iov_len = 0;
} else {
/* adjust the first element */
remain_len = sum_bytes - consume_len;
if (remain_len < iob->iov_len) {
iob->iov_base = (char *)iob->iov_base +
(iob->iov_len - remain_len);
iob->iov_len = remain_len;
}
}
return 0;
}
static inline int iova_slice(SFDynamicIOVArray *iova, const int slice_len)
{
struct iovec *iob;
struct iovec *end;
int sum_bytes;
int exceed_len;
sum_bytes = 0;
end = iova->ptr + iova->input.cnt;
for (iob=iova->iov; iob<end; iob++) {
sum_bytes += iob->iov_len;
if (sum_bytes > slice_len) {
exceed_len = sum_bytes - slice_len;
iob->iov_len -= exceed_len;
break;
} else if (sum_bytes == slice_len) {
break;
}
}
if (iob < end) {
iova->cnt = (iob - iova->iov) + 1;
return 0;
} else {
logError("file: "__FILE__", line: %d, "
"iov remain bytes: %d < slice length: %d",
__LINE__, sum_bytes, slice_len);
iova->cnt = 0;
return EOVERFLOW;
}
}
int sf_iova_first_slice(SFDynamicIOVArray *iova, const int slice_len)
{
int result;
if ((result=sf_iova_check_alloc(iova)) != 0) {
return result;
}
return iova_slice(iova, slice_len);
}
int sf_iova_next_slice(SFDynamicIOVArray *iova,
const int consume_len, const int slice_len)
{
struct iovec *last;
const struct iovec *origin;
int remain_len;
int result;
if ((result=sf_iova_consume(iova, consume_len)) != 0) {
return result;
}
last = iova->iov + iova->cnt - 1;
origin = iova->input.iov + (last - iova->ptr);
remain_len = ((char *)origin->iov_base + origin->iov_len) -
(char *)last->iov_base;
if (last->iov_len != remain_len) {
last->iov_len = remain_len;
if (iova->cnt == 0) {
iova->iov = last;
}
}
return iova_slice(iova, slice_len);
}
int sf_iova_memset_ex(const struct iovec *iov, const int iovcnt,
int c, const int offset, const int length)
{
const struct iovec *iob;
const struct iovec *end;
int sum_bytes;
int remain_len;
int left_bytes;
char *start;
if (length == 0) {
return 0;
}
sum_bytes = 0;
end = iov + iovcnt;
for (iob=iov; iob<end; iob++) {
sum_bytes += iob->iov_len;
if (sum_bytes > offset) {
break;
}
}
if (iob == end) {
logError("file: "__FILE__", line: %d, "
"iov length: %d < (offset: %d + length: %d)",
__LINE__, sum_bytes, offset, length);
return EOVERFLOW;
}
remain_len = sum_bytes - offset;
start = (char *)iob->iov_base + (iob->iov_len - remain_len);
if (length <= remain_len) {
memset(start, c, length);
return 0;
}
memset(start, c, remain_len);
left_bytes = length - remain_len;
while (++iob < end) {
if (left_bytes <= iob->iov_len) {
memset(iob->iov_base, c, left_bytes);
return 0;
}
memset(iob->iov_base, c, iob->iov_len);
left_bytes -= iob->iov_len;
}
logError("file: "__FILE__", line: %d, "
"iov length is too short, overflow bytes: %d",
__LINE__, left_bytes);
return EOVERFLOW;
}

116
src/sf_iov.h Normal file
View File

@ -0,0 +1,116 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _SF_IOV_H
#define _SF_IOV_H
#include "fastcommon/shared_func.h"
#include "sf_types.h"
#define SF_IOV_FIXED_SIZE 256
typedef struct sf_dynamic_iov_array {
struct iovec holder[SF_IOV_FIXED_SIZE];
struct iovec *ptr;
struct {
const struct iovec *iov;
int cnt;
} input;
struct iovec *iov;
int cnt;
} SFDynamicIOVArray;
#define sf_iova_init(iova, _iov, _cnt) \
(iova).input.iov = _iov; \
(iova).iov = (struct iovec *)_iov; \
(iova).cnt = (iova).input.cnt = _cnt
#define sf_iova_destroy(iova) \
if ((iova).iov != (struct iovec *)(iova).input.iov && \
(iova).ptr != (iova).holder) \
free((iova).ptr)
#ifdef __cplusplus
extern "C" {
#endif
static inline int sf_iova_check_alloc(SFDynamicIOVArray *iova)
{
if (iova->iov == (struct iovec *)iova->input.iov) {
if (iova->input.cnt <= SF_IOV_FIXED_SIZE) {
iova->ptr = iova->holder;
} else {
iova->ptr = fc_malloc(iova->input.cnt *
sizeof(struct iovec));
if (iova->ptr == NULL) {
return ENOMEM;
}
}
memcpy(iova->ptr, iova->input.iov, iova->input.cnt *
sizeof(struct iovec));
iova->iov = iova->ptr;
}
return 0;
}
int sf_iova_consume(SFDynamicIOVArray *iova, const int consume_len);
int sf_iova_first_slice(SFDynamicIOVArray *iova, const int slice_len);
int sf_iova_next_slice(SFDynamicIOVArray *iova,
const int consume_len, const int slice_len);
int sf_iova_memset_ex(const struct iovec *iov, const int iovcnt,
int c, const int offset, const int length);
#define sf_iova_memset(iova, c, offset, length) \
sf_iova_memset_ex((iova).iov, (iova).cnt, c, offset, length)
static inline void sf_iova_memcpy_ex(const struct iovec *iov,
const int iovcnt, const char *buff, const int length)
{
const struct iovec *iob;
const struct iovec *end;
const char *current;
int remain;
int bytes;
current = buff;
remain = length;
end = iov + iovcnt;
for (iob=iov; iob<end; iob++) {
bytes = FC_MIN(remain, iob->iov_len);
memcpy(iob->iov_base, current, bytes);
remain -= bytes;
if (remain == 0) {
break;
}
current += bytes;
}
}
#define sf_iova_memcpy(iova, buff, length) \
sf_iova_memcpy_ex((iova).iov, (iova).cnt, buff, length)
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -22,8 +22,13 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include "fastcommon/fast_task_queue.h" #include "fastcommon/fast_task_queue.h"
#include "fastcommon/ioevent_loop.h"
#include "sf_define.h" #include "sf_define.h"
#include "sf_types.h" #include "sf_types.h"
#include "sf_global.h"
#define SF_CTX (task->handler->fh->ctx)
#define SF_NET_BUFFER_CFG SF_CTX->net_buffer_cfg
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@ -31,50 +36,63 @@ extern "C" {
void sf_set_parameters_ex(SFContext *sf_context, const int header_size, void sf_set_parameters_ex(SFContext *sf_context, const int header_size,
sf_set_body_length_callback set_body_length_func, sf_set_body_length_callback set_body_length_func,
sf_deal_task_func deal_func, TaskCleanUpCallback cleanup_func, sf_alloc_recv_buffer_callback alloc_recv_buffer_func,
sf_send_done_callback send_done_callback,
sf_deal_task_callback deal_func, TaskCleanUpCallback cleanup_func,
sf_recv_timeout_callback timeout_callback, sf_release_buffer_callback sf_recv_timeout_callback timeout_callback, sf_release_buffer_callback
release_buffer_callback); release_buffer_callback);
#define sf_set_parameters(header_size, set_body_length_func, \ #define sf_set_parameters(header_size, set_body_length_func, \
deal_func, cleanup_func, timeout_callback) \ alloc_recv_buffer_func, deal_func, cleanup_func, timeout_callback) \
sf_set_parameters_ex(&g_sf_context, header_size, \ sf_set_parameters_ex(&g_sf_context, header_size, \
set_body_length_func, deal_func, \ set_body_length_func, alloc_recv_buffer_func, \
cleanup_func, timeout_callback, NULL) deal_func, cleanup_func, timeout_callback, NULL)
static inline void sf_set_deal_task_func_ex(SFContext *sf_context, static inline void sf_set_deal_task_callback_ex(SFContext *sf_context,
sf_deal_task_func deal_func) sf_deal_task_callback deal_func)
{ {
sf_context->deal_task = deal_func; sf_context->callbacks.deal_task = deal_func;
} }
#define sf_set_deal_task_func(deal_func) \ #define sf_set_deal_task_callback(deal_func) \
sf_set_deal_task_func_ex(&g_sf_context, deal_func) sf_set_deal_task_callback_ex(&g_sf_context, deal_func)
static inline void sf_set_remove_from_ready_list_ex(SFContext *sf_context,
const bool enabled) static inline void sf_set_connect_done_callback_ex(SFContext *sf_context,
sf_connect_done_callback done_callback)
{ {
sf_context->remove_from_ready_list = enabled; sf_context->callbacks.connect_done = done_callback;
} }
#define sf_set_remove_from_ready_list(enabled) \ #define sf_set_connect_done_callback(done_callback) \
sf_set_remove_from_ready_list_ex(&g_sf_context, enabled); sf_set_connect_done_callback_ex(&g_sf_context, done_callback)
static inline TaskCleanUpCallback sf_get_task_cleanup_func_ex(
static inline TaskCleanUpCallback sf_get_task_cleanup_callback_ex(
SFContext *sf_context) SFContext *sf_context)
{ {
return sf_context->task_cleanup_func; return sf_context->callbacks.task_cleanup;
} }
#define sf_get_task_cleanup_func() \ #define sf_get_task_cleanup_callback() \
sf_get_task_cleanup_func_ex(&g_sf_context) sf_get_task_cleanup_callback_ex(&g_sf_context)
#define sf_nio_task_is_idle(task) \ #define sf_nio_task_send_done(task) \
(task->offset == 0 && task->length == 0) (task->send.ptr->offset == 0 && task->send.ptr->length == 0)
void sf_recv_notify_read(int sock, short event, void *arg); static inline void sf_nio_reset_task_length(struct fast_task_info *task)
{
task->send.ptr->length = 0;
task->send.ptr->offset = 0;
if (task->recv.ptr != task->send.ptr) {
task->recv.ptr->length = 0;
task->recv.ptr->offset = 0;
}
}
void sf_socket_close_connection(struct fast_task_info *task);
void sf_recv_notify_read(int sock, const int event, void *arg);
int sf_send_add_event(struct fast_task_info *task); int sf_send_add_event(struct fast_task_info *task);
int sf_client_sock_write(int sock, short event, void *arg);
int sf_client_sock_read(int sock, short event, void *arg);
void sf_task_finish_clean_up(struct fast_task_info *task); void sf_task_finish_clean_up(struct fast_task_info *task);
@ -87,6 +105,42 @@ void sf_task_switch_thread(struct fast_task_info *task,
void sf_task_detach_thread(struct fast_task_info *task); void sf_task_detach_thread(struct fast_task_info *task);
static inline int sf_set_body_length(struct fast_task_info *task)
{
if (SF_CTX->callbacks.set_body_length(task) != 0) {
return -1;
}
if (task->recv.ptr->length < 0) {
logError("file: "__FILE__", line: %d, "
"client ip: %s, pkg length: %d < 0",
__LINE__, task->client_ip,
task->recv.ptr->length);
return -1;
}
task->recv.ptr->length += SF_CTX->header_size;
if (task->recv.ptr->length > SF_NET_BUFFER_CFG.max_pkg_size) {
logError("file: "__FILE__", line: %d, "
"client ip: %s, pkg length: %d > "
"max pkg size: %d", __LINE__,
task->client_ip, task->recv.ptr->length,
SF_NET_BUFFER_CFG.max_pkg_size);
return -1;
}
return 0;
}
int sf_socket_async_connect_server(struct fast_task_info *task);
int sf_socket_async_connect_check(struct fast_task_info *task);
ssize_t sf_socket_send_data(struct fast_task_info *task,
SFCommAction *action, bool *send_done);
ssize_t sf_socket_recv_data(struct fast_task_info *task,
const bool call_post_recv, SFCommAction *action);
int sf_rdma_busy_polling_callback(struct nio_thread_data *thread_data);
static inline int sf_nio_forward_request(struct fast_task_info *task, static inline int sf_nio_forward_request(struct fast_task_info *task,
const int new_thread_index) const int new_thread_index)
{ {
@ -94,11 +148,18 @@ static inline int sf_nio_forward_request(struct fast_task_info *task,
return sf_nio_notify(task, SF_NIO_STAGE_FORWARDED); return sf_nio_notify(task, SF_NIO_STAGE_FORWARDED);
} }
static inline bool sf_client_sock_in_read_stage(struct fast_task_info *task) static inline void sf_nio_add_to_deleted_list(struct nio_thread_data
*thread_data, struct fast_task_info *task)
{ {
return (task->event.callback == (IOEventCallback)sf_client_sock_read); if (task->thread_data == thread_data) {
ioevent_add_to_deleted_list(task);
} else {
sf_nio_notify(task, SF_NIO_STAGE_CLOSE);
}
} }
bool sf_client_sock_in_read_stage(struct fast_task_info *task);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -35,7 +35,7 @@
#include "sf_ordered_writer.h" #include "sf_ordered_writer.h"
#define deal_binlog_one_record(writer, wb) \ #define deal_binlog_one_record(writer, wb) \
sf_file_writer_deal_buffer(&(writer)->fw, &wb->bf, wb->version) sf_file_writer_deal_versioned_buffer(&(writer)->fw, &wb->bf, wb->version)
static inline int flush_writer_files(SFOrderedWriterInfo *writer) static inline int flush_writer_files(SFOrderedWriterInfo *writer)
{ {
@ -106,7 +106,7 @@ void sf_ordered_writer_finish(SFOrderedWriterContext *ctx)
{ {
int count; int count;
if (ctx->writer.fw.file.name != NULL) { if (ctx->writer.fw.file.name.str != NULL) {
fc_queue_terminate(&ctx->thread.queues.version); fc_queue_terminate(&ctx->thread.queues.version);
count = 0; count = 0;
@ -120,8 +120,8 @@ void sf_ordered_writer_finish(SFOrderedWriterContext *ctx)
__LINE__, ctx->writer.fw.cfg.subdir_name); __LINE__, ctx->writer.fw.cfg.subdir_name);
} }
free(ctx->writer.fw.file.name); free(ctx->writer.fw.file.name.str);
ctx->writer.fw.file.name = NULL; ctx->writer.fw.file.name.str = NULL;
} }
if (ctx->writer.fw.file.fd >= 0) { if (ctx->writer.fw.file.fd >= 0) {
@ -142,8 +142,7 @@ static void *binlog_writer_func(void *arg)
#ifdef OS_LINUX #ifdef OS_LINUX
{ {
char thread_name[64]; char thread_name[64];
snprintf(thread_name, sizeof(thread_name), fc_combine_two_strings(thread->name, "writer", '-', thread_name);
"%s-writer", thread->name);
prctl(PR_SET_NAME, thread_name); prctl(PR_SET_NAME, thread_name);
} }
#endif #endif
@ -179,12 +178,18 @@ static int binlog_wbuffer_alloc_init(void *element, void *args)
return 0; return 0;
} }
static int compare_buffer_version(const SFOrderedWriterBuffer *entry1, static int push_compare_buffer_version(const SFOrderedWriterBuffer *entry1,
const SFOrderedWriterBuffer *entry2) const SFOrderedWriterBuffer *entry2)
{ {
return fc_compare_int64(entry1->version, entry2->version); return fc_compare_int64(entry1->version, entry2->version);
} }
static int pop_compare_buffer_version(const SFOrderedWriterBuffer *entry,
const SFOrderedWriterBuffer *less_equal, void *arg)
{
return fc_compare_int64(entry->version, less_equal->version);
}
static int sf_ordered_writer_init_thread(SFOrderedWriterContext *context, static int sf_ordered_writer_init_thread(SFOrderedWriterContext *context,
const char *name, const int max_record_size) const char *name, const int max_record_size)
{ {
@ -197,7 +202,7 @@ static int sf_ordered_writer_init_thread(SFOrderedWriterContext *context,
thread = &context->thread; thread = &context->thread;
writer = &context->writer; writer = &context->writer;
snprintf(thread->name, sizeof(thread->name), "%s", name); fc_safe_strcpy(thread->name, name);
writer->fw.cfg.max_record_size = max_record_size; writer->fw.cfg.max_record_size = max_record_size;
writer->thread = thread; writer->thread = thread;
@ -223,9 +228,11 @@ static int sf_ordered_writer_init_thread(SFOrderedWriterContext *context,
} }
if ((result=sorted_queue_init(&thread->queues.buffer, (unsigned long) if ((result=sorted_queue_init(&thread->queues.buffer, (unsigned long)
(&((SFOrderedWriterBuffer *)NULL)->next), (&((SFOrderedWriterBuffer *)NULL)->dlink),
(int (*)(const void *, const void *)) (int (*)(const void *, const void *))
compare_buffer_version)) != 0) push_compare_buffer_version,
(int (*)(const void *, const void *, void *arg))
pop_compare_buffer_version, NULL)) != 0)
{ {
return result; return result;
} }
@ -234,13 +241,16 @@ static int sf_ordered_writer_init_thread(SFOrderedWriterContext *context,
context, SF_G_THREAD_STACK_SIZE); context, SF_G_THREAD_STACK_SIZE);
} }
int sf_ordered_writer_init(SFOrderedWriterContext *context, int sf_ordered_writer_init_ex(SFOrderedWriterContext *context,
const char *data_path, const char *subdir_name, const char *data_path, const char *subdir_name,
const int buffer_size, const int max_record_size) const char *file_prefix, const int buffer_size,
const int max_record_size, const int64_t file_rotate_size,
const bool call_fsync)
{ {
int result; int result;
if ((result=sf_file_writer_init_normal(&context->writer.fw, if ((result=sf_file_writer_init(&context->writer.fw, data_path,
data_path, subdir_name, buffer_size)) != 0) subdir_name, file_prefix, max_record_size,
buffer_size, file_rotate_size, call_fsync)) != 0)
{ {
return result; return result;
} }

View File

@ -29,7 +29,7 @@ typedef struct sf_writer_version_entry {
typedef struct sf_ordered_writer_buffer { typedef struct sf_ordered_writer_buffer {
int64_t version; int64_t version;
BufferInfo bf; BufferInfo bf;
struct sf_ordered_writer_buffer *next; struct fc_list_head dlink;
} SFOrderedWriterBuffer; } SFOrderedWriterBuffer;
typedef struct sf_orderd_writer_thread { typedef struct sf_orderd_writer_thread {
@ -62,9 +62,17 @@ typedef struct sf_ordered_writer_context {
extern "C" { extern "C" {
#endif #endif
int sf_ordered_writer_init(SFOrderedWriterContext *context, int sf_ordered_writer_init_ex(SFOrderedWriterContext *context,
const char *data_path, const char *subdir_name, const char *data_path, const char *subdir_name,
const int buffer_size, const int max_record_size); const char *file_prefix, const int buffer_size,
const int max_record_size, const int64_t file_rotate_size,
const bool call_fsync);
#define sf_ordered_writer_init(context, data_path, \
subdir_name, buffer_size, max_record_size) \
sf_ordered_writer_init_ex(context, data_path, subdir_name, \
SF_BINLOG_FILE_PREFIX_STR, buffer_size, max_record_size, \
SF_BINLOG_DEFAULT_ROTATE_SIZE, true)
#define sf_ordered_writer_set_flags(ctx, flags) \ #define sf_ordered_writer_set_flags(ctx, flags) \
sf_file_writer_set_flags(&(ctx)->writer.fw, flags) sf_file_writer_set_flags(&(ctx)->writer.fw, flags)
@ -117,10 +125,11 @@ static inline SFOrderedWriterBuffer *sf_ordered_writer_alloc_buffer(
#define sf_ordered_writer_set_binlog_index(ctx, binlog_index) \ #define sf_ordered_writer_set_binlog_index(ctx, binlog_index) \
sf_file_writer_set_binlog_index(&(ctx)->writer.fw, binlog_index) sf_file_writer_set_binlog_index(&(ctx)->writer.fw, binlog_index)
#define sf_push_to_binlog_thread_queue(ctx, buffer) \ #define sf_ordered_writer_push_to_thread_queue(ctx, buffer) \
sorted_queue_push(&(ctx)->thread.queues.buffer, buffer) sorted_queue_push(&(ctx)->thread.queues.buffer, buffer)
static inline void sf_push_to_binlog_write_queue(SFOrderedWriterContext *ctx, static inline void sf_ordered_writer_push_to_queue(
SFOrderedWriterContext *ctx,
SFOrderedWriterBuffer *buffer) SFOrderedWriterBuffer *buffer)
{ {
sorted_queue_push(&ctx->thread.queues.buffer, buffer); sorted_queue_push(&ctx->thread.queues.buffer, buffer);

View File

@ -14,8 +14,6 @@
*/ */
#include <errno.h>
#include "fastcommon/shared_func.h"
#include "sf_util.h" #include "sf_util.h"
#include "sf_nio.h" #include "sf_nio.h"
#include "sf_proto.h" #include "sf_proto.h"
@ -29,19 +27,23 @@ static int64_t log_slower_than_us = 0;
int sf_proto_set_body_length(struct fast_task_info *task) int sf_proto_set_body_length(struct fast_task_info *task)
{ {
SFCommonProtoHeader *header; SFCommonProtoHeader *header;
char formatted_ip[FORMATTED_IP_SIZE];
header = (SFCommonProtoHeader *)task->data; header = (SFCommonProtoHeader *)task->recv.ptr->data;
if (!SF_PROTO_CHECK_MAGIC(header->magic)) { if (!SF_PROTO_CHECK_MAGIC(header->magic)) {
format_ip_address(task->client_ip, formatted_ip);
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"peer %s:%u, magic "SF_PROTO_MAGIC_FORMAT "%s peer %s:%u, magic "SF_PROTO_MAGIC_FORMAT" is invalid, "
" is invalid, expect: "SF_PROTO_MAGIC_FORMAT, "expect: "SF_PROTO_MAGIC_FORMAT", cmd: %d, body length: %d",
__LINE__, task->client_ip, task->port, __LINE__, (task->handler != NULL ? task->handler->fh->ctx->
name : ""), formatted_ip, task->port,
SF_PROTO_MAGIC_PARAMS(header->magic), SF_PROTO_MAGIC_PARAMS(header->magic),
SF_PROTO_MAGIC_EXPECT_PARAMS); SF_PROTO_MAGIC_EXPECT_PARAMS, header->cmd,
buff2int(header->body_len));
return EINVAL; return EINVAL;
} }
task->length = buff2int(header->body_len); //set body length task->recv.ptr->length = buff2int(header->body_len); //set body length
return 0; return 0;
} }
@ -70,8 +72,14 @@ int sf_check_response(ConnectionInfo *conn, SFResponseInfo *response,
response->error.length = response->header.body_len; response->error.length = response->header.body_len;
} }
if ((result=tcprecvdata_nb_ex(conn->sock, response->error.message, if (conn->comm_type == fc_comm_type_rdma) {
response->error.length, network_timeout, &recv_bytes)) == 0) memcpy(response->error.message, G_RDMA_CONNECTION_CALLBACKS.
get_recv_buffer(conn)->buff + sizeof(SFCommonProtoHeader),
response->error.length);
response->error.message[response->error.length] = '\0';
} else if ((result=tcprecvdata_nb_ex(conn->sock, response->
error.message, response->error.length,
network_timeout, &recv_bytes)) == 0)
{ {
response->error.message[response->error.length] = '\0'; response->error.message[response->error.length] = '\0';
} else { } else {
@ -96,8 +104,37 @@ static inline int sf_recv_response_header(ConnectionInfo *conn,
SFResponseInfo *response, const int network_timeout) SFResponseInfo *response, const int network_timeout)
{ {
int result; int result;
BufferInfo *buffer;
SFCommonProtoHeader header_proto; SFCommonProtoHeader header_proto;
if (conn->comm_type == fc_comm_type_rdma) {
buffer = G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn);
if (buffer->length < sizeof(SFCommonProtoHeader)) {
response->error.length = sprintf(response->error.message,
"recv pkg length: %d < header size: %d",
buffer->length, (int)sizeof(SFCommonProtoHeader));
return EINVAL;
}
if ((result=sf_proto_parse_header((SFCommonProtoHeader *)
buffer->buff, response)) != 0)
{
return result;
}
if (buffer->length != (sizeof(SFCommonProtoHeader) +
response->header.body_len))
{
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"recv package length: %d != calculate: %d",
buffer->length, (int)(sizeof(SFCommonProtoHeader) +
response->header.body_len));
return EINVAL;
}
return 0;
} else {
if ((result=tcprecvdata_nb(conn->sock, &header_proto, if ((result=tcprecvdata_nb(conn->sock, &header_proto,
sizeof(SFCommonProtoHeader), network_timeout)) != 0) sizeof(SFCommonProtoHeader), network_timeout)) != 0)
{ {
@ -107,19 +144,8 @@ static inline int sf_recv_response_header(ConnectionInfo *conn,
result, STRERROR(result)); result, STRERROR(result));
return result; return result;
} }
return sf_proto_parse_header(&header_proto, response);
if (!SF_PROTO_CHECK_MAGIC(header_proto.magic)) {
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"magic "SF_PROTO_MAGIC_FORMAT" is invalid, "
"expect: "SF_PROTO_MAGIC_FORMAT,
SF_PROTO_MAGIC_PARAMS(header_proto.magic),
SF_PROTO_MAGIC_EXPECT_PARAMS);
return EINVAL;
} }
sf_proto_extract_header(&header_proto, &response->header);
return 0;
} }
int sf_send_and_recv_response_header(ConnectionInfo *conn, char *data, int sf_send_and_recv_response_header(ConnectionInfo *conn, char *data,
@ -127,11 +153,9 @@ int sf_send_and_recv_response_header(ConnectionInfo *conn, char *data,
{ {
int result; int result;
if ((result=tcpsenddata_nb(conn->sock, data, len, network_timeout)) != 0) { if ((result=sf_proto_send_buf1(conn, data, len,
response->error.length = snprintf(response->error.message, response, network_timeout)) != 0)
sizeof(response->error.message), {
"send data fail, errno: %d, error info: %s",
result, STRERROR(result));
return result; return result;
} }
@ -194,7 +218,10 @@ int sf_send_and_recv_response_ex(ConnectionInfo *conn, char *send_data,
return 0; return 0;
} }
if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, response-> if (conn->comm_type == fc_comm_type_rdma) {
memcpy(recv_data, G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn)->
buff + sizeof(SFCommonProtoHeader), response->header.body_len);
} else if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, response->
header.body_len, network_timeout, &recv_bytes)) != 0) header.body_len, network_timeout, &recv_bytes)) != 0)
{ {
response->error.length = snprintf(response->error.message, response->error.length = snprintf(response->error.message,
@ -234,7 +261,11 @@ int sf_send_and_recv_response_ex1(ConnectionInfo *conn, char *send_data,
return EOVERFLOW; return EOVERFLOW;
} }
if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, response-> if (conn->comm_type == fc_comm_type_rdma) {
memcpy(recv_data, G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn)->
buff + sizeof(SFCommonProtoHeader), response->header.body_len);
*body_len = response->header.body_len;
} else if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, response->
header.body_len, network_timeout, body_len)) != 0) header.body_len, network_timeout, body_len)) != 0)
{ {
response->error.length = snprintf(response->error.message, response->error.length = snprintf(response->error.message,
@ -275,7 +306,10 @@ int sf_recv_response(ConnectionInfo *conn, SFResponseInfo *response,
return 0; return 0;
} }
if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, expect_body_len, if (conn->comm_type == fc_comm_type_rdma) {
memcpy(recv_data, G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn)->
buff + sizeof(SFCommonProtoHeader), response->header.body_len);
} else if ((result=tcprecvdata_nb_ex(conn->sock, recv_data, expect_body_len,
network_timeout, &recv_bytes)) != 0) network_timeout, &recv_bytes)) != 0)
{ {
response->error.length = snprintf(response->error.message, response->error.length = snprintf(response->error.message,
@ -343,7 +377,10 @@ int sf_recv_vary_response(ConnectionInfo *conn, SFResponseInfo *response,
buffer->alloc_size = alloc_size; buffer->alloc_size = alloc_size;
} }
if ((result=tcprecvdata_nb_ex(conn->sock, buffer->buff, response-> if (conn->comm_type == fc_comm_type_rdma) {
memcpy(buffer->buff, G_RDMA_CONNECTION_CALLBACKS.get_recv_buffer(conn)->
buff + sizeof(SFCommonProtoHeader), response->header.body_len);
} else if ((result=tcprecvdata_nb_ex(conn->sock, buffer->buff, response->
header.body_len, network_timeout, &recv_bytes)) != 0) header.body_len, network_timeout, &recv_bytes)) != 0)
{ {
response->error.length = snprintf(response->error.message, response->error.length = snprintf(response->error.message,
@ -364,13 +401,9 @@ int sf_send_and_recv_vary_response(ConnectionInfo *conn,
{ {
int result; int result;
if ((result=tcpsenddata_nb(conn->sock, send_data, if ((result=sf_proto_send_buf1(conn, send_data, send_len,
send_len, network_timeout)) != 0) response, network_timeout)) != 0)
{ {
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"send data fail, errno: %d, error info: %s",
result, STRERROR(result));
return result; return result;
} }
@ -411,6 +444,10 @@ const char *sf_get_cmd_caption(const int cmd)
return "GET_LEADER_REQ"; return "GET_LEADER_REQ";
case SF_SERVICE_PROTO_GET_LEADER_RESP: case SF_SERVICE_PROTO_GET_LEADER_RESP:
return "GET_LEADER_RESP"; return "GET_LEADER_RESP";
case SF_CLUSTER_PROTO_GET_SERVER_STATUS_REQ:
return "GET_SERVER_STATUS_REQ";
case SF_CLUSTER_PROTO_GET_SERVER_STATUS_RESP:
return "GET_SERVER_STATUS_RESP";
default: default:
return "UNKOWN"; return "UNKOWN";
} }
@ -419,14 +456,17 @@ const char *sf_get_cmd_caption(const int cmd)
int sf_proto_deal_ack(struct fast_task_info *task, int sf_proto_deal_ack(struct fast_task_info *task,
SFRequestInfo *request, SFResponseInfo *response) SFRequestInfo *request, SFResponseInfo *response)
{ {
char formatted_ip[FORMATTED_IP_SIZE];
if (request->header.status != 0) { if (request->header.status != 0) {
if (request->header.body_len > 0) { if (request->header.body_len > 0) {
int remain_size; int remain_size;
int len; int len;
format_ip_address(task->client_ip, formatted_ip);
response->error.length = sprintf(response->error.message, response->error.length = sprintf(response->error.message,
"message from peer %s:%u => ", "message from peer %s:%u => ",
task->client_ip, task->port); formatted_ip, task->port);
remain_size = sizeof(response->error.message) - remain_size = sizeof(response->error.message) -
response->error.length; response->error.length;
if (request->header.body_len >= remain_size) { if (request->header.body_len >= remain_size) {
@ -454,7 +494,8 @@ int sf_proto_deal_ack(struct fast_task_info *task,
} }
int sf_proto_rebind_idempotency_channel(ConnectionInfo *conn, int sf_proto_rebind_idempotency_channel(ConnectionInfo *conn,
const uint32_t channel_id, const int key, const int network_timeout) const char *service_name, const uint32_t channel_id,
const int key, const int network_timeout)
{ {
char out_buff[sizeof(SFCommonProtoHeader) + char out_buff[sizeof(SFCommonProtoHeader) +
sizeof(SFProtoRebindChannelReq)]; sizeof(SFProtoRebindChannelReq)];
@ -474,19 +515,20 @@ int sf_proto_rebind_idempotency_channel(ConnectionInfo *conn,
sizeof(out_buff), &response, network_timeout, sizeof(out_buff), &response, network_timeout,
SF_SERVICE_PROTO_REBIND_CHANNEL_RESP)) != 0) SF_SERVICE_PROTO_REBIND_CHANNEL_RESP)) != 0)
{ {
sf_log_network_error(&response, conn, result); sf_log_network_error(&response, conn, service_name, result);
} }
return result; return result;
} }
int sf_proto_get_group_servers(ConnectionInfo *conn, int sf_proto_get_group_servers(ConnectionInfo *conn,
const int network_timeout, const int group_id, const char *service_name, const int network_timeout,
SFGroupServerArray *sarray) const int group_id, SFGroupServerArray *sarray)
{ {
char out_buff[sizeof(SFCommonProtoHeader) + char out_buff[sizeof(SFCommonProtoHeader) +
sizeof(SFProtoGetGroupServersReq)]; sizeof(SFProtoGetGroupServersReq)];
char in_buff[1024]; char in_buff[1024];
char formatted_ip[FORMATTED_IP_SIZE];
SFCommonProtoHeader *header; SFCommonProtoHeader *header;
SFProtoGetGroupServersReq *req; SFProtoGetGroupServersReq *req;
SFProtoGetGroupServersRespBodyHeader *body_header; SFProtoGetGroupServersRespBodyHeader *body_header;
@ -509,14 +551,15 @@ int sf_proto_get_group_servers(ConnectionInfo *conn,
SF_SERVICE_PROTO_GET_GROUP_SERVERS_RESP, in_buff, SF_SERVICE_PROTO_GET_GROUP_SERVERS_RESP, in_buff,
sizeof(in_buff), &body_len)) != 0) sizeof(in_buff), &body_len)) != 0)
{ {
sf_log_network_error(&response, conn, result); sf_log_network_error(&response, conn, service_name, result);
return result; return result;
} }
if (body_len < sizeof(SFProtoGetGroupServersRespBodyHeader)) { if (body_len < sizeof(SFProtoGetGroupServersRespBodyHeader)) {
format_ip_address(conn->ip_addr, formatted_ip);
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"server %s:%d response body length: %d < %d", "server %s:%u response body length: %d < %d",
__LINE__, conn->ip_addr, conn->port, body_len, __LINE__, formatted_ip, conn->port, body_len,
(int)sizeof(SFProtoGetGroupServersRespBodyHeader)); (int)sizeof(SFProtoGetGroupServersRespBodyHeader));
return EINVAL; return EINVAL;
} }
@ -524,15 +567,17 @@ int sf_proto_get_group_servers(ConnectionInfo *conn,
body_header = (SFProtoGetGroupServersRespBodyHeader *)in_buff; body_header = (SFProtoGetGroupServersRespBodyHeader *)in_buff;
count = buff2int(body_header->count); count = buff2int(body_header->count);
if (count <= 0) { if (count <= 0) {
format_ip_address(conn->ip_addr, formatted_ip);
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"server %s:%d response server count: %d <= 0", "server %s:%u response server count: %d <= 0",
__LINE__, conn->ip_addr, conn->port, count); __LINE__, formatted_ip, conn->port, count);
return EINVAL; return EINVAL;
} }
if (count > sarray->alloc) { if (count > sarray->alloc) {
format_ip_address(conn->ip_addr, formatted_ip);
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"server %s:%d response server count: %d is too large, " "server %s:%u response server count: %d is too large, "
"exceeds %d", __LINE__, conn->ip_addr, conn->port, "exceeds %d", __LINE__, formatted_ip, conn->port,
count, sarray->alloc); count, sarray->alloc);
return EOVERFLOW; return EOVERFLOW;
} }
@ -549,9 +594,8 @@ int sf_proto_get_group_servers(ConnectionInfo *conn,
return 0; return 0;
} }
int sf_proto_get_leader(ConnectionInfo *conn, int sf_proto_get_leader(ConnectionInfo *conn, const char *service_name,
const int network_timeout, const int network_timeout, SFClientServerEntry *leader)
SFClientServerEntry *leader)
{ {
int result; int result;
SFCommonProtoHeader *header; SFCommonProtoHeader *header;
@ -567,12 +611,13 @@ int sf_proto_get_leader(ConnectionInfo *conn,
SF_SERVICE_PROTO_GET_LEADER_RESP, (char *)&server_resp, SF_SERVICE_PROTO_GET_LEADER_RESP, (char *)&server_resp,
sizeof(SFProtoGetServerResp))) != 0) sizeof(SFProtoGetServerResp))) != 0)
{ {
sf_log_network_error(&response, conn, result); sf_log_network_error(&response, conn, service_name, result);
} else { } else {
leader->server_id = buff2int(server_resp.server_id); leader->server_id = buff2int(server_resp.server_id);
memcpy(leader->conn.ip_addr, server_resp.ip_addr, IP_ADDRESS_SIZE); memcpy(leader->conn.ip_addr, server_resp.ip_addr, IP_ADDRESS_SIZE);
*(leader->conn.ip_addr + IP_ADDRESS_SIZE - 1) = '\0'; *(leader->conn.ip_addr + IP_ADDRESS_SIZE - 1) = '\0';
leader->conn.port = buff2short(server_resp.port); leader->conn.port = buff2short(server_resp.port);
leader->conn.comm_type = conn->comm_type;
} }
return result; return result;
@ -585,20 +630,22 @@ void sf_proto_set_handler_context(const SFHandlerContext *ctx)
} }
int sf_proto_deal_task_done(struct fast_task_info *task, int sf_proto_deal_task_done(struct fast_task_info *task,
SFCommonTaskContext *ctx) const char *service_name, SFCommonTaskContext *ctx)
{ {
SFCommonProtoHeader *proto_header; SFCommonProtoHeader *proto_header;
int status; int status;
int r; int r;
int64_t time_used; int64_t time_used;
int log_level; int log_level;
char formatted_ip[FORMATTED_IP_SIZE];
char time_buff[32]; char time_buff[32];
if (ctx->log_level != LOG_NOTHING && ctx->response.error.length > 0) { if (ctx->log_level != LOG_NOTHING && ctx->response.error.length > 0) {
log_it_ex(&g_log_context, ctx->log_level, log_it_ex(&g_log_context, ctx->log_level,
"file: "__FILE__", line: %d, " "file: "__FILE__", line: %d, %s "
"peer %s:%u, cmd: %d (%s), req body length: %d, " "peer %s:%u, cmd: %d (%s), req body length: %d, "
"resp status: %d, %s", __LINE__, task->client_ip, "resp status: %d, %s", __LINE__, service_name,
format_ip_address(task->client_ip, formatted_ip),
task->port, ctx->request.header.cmd, task->port, ctx->request.header.cmd,
GET_CMD_CAPTION(ctx->request.header.cmd), GET_CMD_CAPTION(ctx->request.header.cmd),
ctx->request.header.body_len, ctx->response.header.status, ctx->request.header.body_len, ctx->response.header.status,
@ -610,36 +657,38 @@ int sf_proto_deal_task_done(struct fast_task_info *task,
time_used = get_current_time_us() - ctx->req_start_time; time_used = get_current_time_us() - ctx->req_start_time;
log_level = GET_CMD_LOG_LEVEL(ctx->request.header.cmd); log_level = GET_CMD_LOG_LEVEL(ctx->request.header.cmd);
log_it_ex(&g_log_context, log_level, "file: "__FILE__", line: %d, " log_it_ex(&g_log_context, log_level, "file: "__FILE__", line: %d, "
"client %s:%u, req cmd: %d (%s), req body_len: %d, " "%s client %s:%u, req cmd: %d (%s), req body_len: %d, "
"resp status: %d, time used: %s us", __LINE__, "resp status: %d, time used: %s us", __LINE__, service_name,
task->client_ip, task->port, ctx->request.header.cmd, format_ip_address(task->client_ip, formatted_ip),
task->port, ctx->request.header.cmd,
GET_CMD_CAPTION(ctx->request.header.cmd), GET_CMD_CAPTION(ctx->request.header.cmd),
ctx->request.header.body_len, ctx->response.header.status, ctx->request.header.body_len, ctx->response.header.status,
long_to_comma_str(time_used, time_buff)); long_to_comma_str(time_used, time_buff));
} }
if (ctx->response.header.status == 0) { if (ctx->response.header.status == 0) {
task->offset = task->length = 0;
return sf_set_read_event(task); return sf_set_read_event(task);
} else { } else {
return FC_NEGATIVE(ctx->response.header.status); return FC_NEGATIVE(ctx->response.header.status);
} }
} }
proto_header = (SFCommonProtoHeader *)task->data; proto_header = (SFCommonProtoHeader *)task->send.ptr->data;
if (!ctx->response_done) { if (!ctx->response_done) {
ctx->response.header.body_len = ctx->response.error.length; ctx->response.header.body_len = ctx->response.error.length;
if (ctx->response.error.length > 0) { if (ctx->response.error.length > 0) {
memcpy(task->data + sizeof(SFCommonProtoHeader), memcpy(task->send.ptr->data + sizeof(SFCommonProtoHeader),
ctx->response.error.message, ctx->response.error.length); ctx->response.error.message, ctx->response.error.length);
} }
} }
status = sf_unify_errno(FC_ABS(ctx->response.header.status)); status = sf_unify_errno(FC_ABS(ctx->response.header.status));
short2buff(status, proto_header->status); short2buff(status, proto_header->status);
short2buff(ctx->response.header.flags, proto_header->flags);
proto_header->cmd = ctx->response.header.cmd; proto_header->cmd = ctx->response.header.cmd;
int2buff(ctx->response.header.body_len, proto_header->body_len); int2buff(ctx->response.header.body_len, proto_header->body_len);
task->length = sizeof(SFCommonProtoHeader) + ctx->response.header.body_len; task->send.ptr->length = sizeof(SFCommonProtoHeader) +
ctx->response.header.body_len;
r = sf_send_add_event(task); r = sf_send_add_event(task);
time_used = get_current_time_us() - ctx->req_start_time; time_used = get_current_time_us() - ctx->req_start_time;
@ -649,10 +698,11 @@ int sf_proto_deal_task_done(struct fast_task_info *task,
char buff[256]; char buff[256];
int blen; int blen;
blen = sprintf(buff, "timed used: %s us, client %s:%u, " blen = sprintf(buff, "timed used: %s us, %s client %s:%u, "
"req cmd: %d (%s), req body len: %d, resp cmd: %d (%s), " "req cmd: %d (%s), req body len: %d, resp cmd: %d (%s), "
"status: %d, resp body len: %d", long_to_comma_str(time_used, "status: %d, resp body len: %d", long_to_comma_str(time_used,
time_buff), task->client_ip, task->port, ctx->request. time_buff), service_name, format_ip_address(task->
client_ip, formatted_ip), task->port, ctx->request.
header.cmd, GET_CMD_CAPTION(ctx->request.header.cmd), header.cmd, GET_CMD_CAPTION(ctx->request.header.cmd),
ctx->request.header.body_len, ctx->response.header.cmd, ctx->request.header.body_len, ctx->response.header.cmd,
GET_CMD_CAPTION(ctx->response.header.cmd), GET_CMD_CAPTION(ctx->response.header.cmd),
@ -663,10 +713,11 @@ int sf_proto_deal_task_done(struct fast_task_info *task,
if (sf_handler_ctx.callbacks.get_cmd_log_level != NULL) { if (sf_handler_ctx.callbacks.get_cmd_log_level != NULL) {
log_level = GET_CMD_LOG_LEVEL(ctx->request.header.cmd); log_level = GET_CMD_LOG_LEVEL(ctx->request.header.cmd);
log_it_ex(&g_log_context, log_level, "file: "__FILE__", line: %d, " log_it_ex(&g_log_context, log_level, "file: "__FILE__", line: %d, "
"client %s:%u, req cmd: %d (%s), req body_len: %d, " "%s client %s:%u, req cmd: %d (%s), req body_len: %d, "
"resp cmd: %d (%s), status: %d, resp body_len: %d, " "resp cmd: %d (%s), status: %d, resp body_len: %d, "
"time used: %s us", __LINE__, "time used: %s us", __LINE__, service_name,
task->client_ip, task->port, ctx->request.header.cmd, format_ip_address(task->client_ip, formatted_ip),
task->port, ctx->request.header.cmd,
GET_CMD_CAPTION(ctx->request.header.cmd), GET_CMD_CAPTION(ctx->request.header.cmd),
ctx->request.header.body_len, ctx->response.header.cmd, ctx->request.header.body_len, ctx->response.header.cmd,
GET_CMD_CAPTION(ctx->response.header.cmd), GET_CMD_CAPTION(ctx->response.header.cmd),

View File

@ -48,6 +48,10 @@
#define SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_REQ 125 #define SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_REQ 125
#define SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_RESP 126 #define SF_SERVICE_PROTO_REPORT_REQ_RECEIPT_RESP 126
#define SF_CLUSTER_PROTO_GET_SERVER_STATUS_REQ 201
#define SF_CLUSTER_PROTO_GET_SERVER_STATUS_RESP 202
#define SF_PROTO_MAGIC_CHAR '@' #define SF_PROTO_MAGIC_CHAR '@'
#define SF_PROTO_SET_MAGIC(m) \ #define SF_PROTO_SET_MAGIC(m) \
m[0] = m[1] = m[2] = m[3] = SF_PROTO_MAGIC_CHAR m[0] = m[1] = m[2] = m[3] = SF_PROTO_MAGIC_CHAR
@ -64,19 +68,17 @@
#define SF_PROTO_MAGIC_PARAMS(m) \ #define SF_PROTO_MAGIC_PARAMS(m) \
m[0], m[1], m[2], m[3] m[0], m[1], m[2], m[3]
#define SF_PROTO_SET_HEADER(header, _cmd, _body_len) \ #define SF_PROTO_SET_HEADER_EX(header, _cmd, _flags, _body_len) \
do { \ do { \
SF_PROTO_SET_MAGIC((header)->magic); \ SF_PROTO_SET_MAGIC((header)->magic); \
(header)->cmd = _cmd; \ (header)->cmd = _cmd; \
(header)->status[0] = (header)->status[1] = 0; \ (header)->status[0] = (header)->status[1] = 0; \
short2buff(_flags, (header)->flags); \
int2buff(_body_len, (header)->body_len); \ int2buff(_body_len, (header)->body_len); \
} while (0) } while (0)
#define SF_PROTO_SET_HEADER_EX(header, _cmd, _flags, _body_len) \ #define SF_PROTO_SET_HEADER(header, _cmd, _body_len) \
do { \ SF_PROTO_SET_HEADER_EX(header, _cmd, 0, _body_len)
SF_PROTO_SET_HEADER(header, _cmd, _body_len); \
short2buff(_flags, (header)->flags); \
} while (0)
#define SF_PROTO_SET_RESPONSE_HEADER(proto_header, resp_header) \ #define SF_PROTO_SET_RESPONSE_HEADER(proto_header, resp_header) \
do { \ do { \
@ -85,22 +87,32 @@
int2buff((resp_header).body_len, (proto_header)->body_len);\ int2buff((resp_header).body_len, (proto_header)->body_len);\
} while (0) } while (0)
#define SF_PROTO_RESP_BODY(task) \
(task->data + sizeof(SFCommonProtoHeader)) #define SF_PROTO_SEND_BODY(task) \
(task->send.ptr->data + sizeof(SFCommonProtoHeader))
#define SF_PROTO_RECV_BODY(task) \
(task->recv.ptr->data + sizeof(SFCommonProtoHeader))
#define SF_RECV_BODY_LENGTH(task) \
(task->recv.ptr->length - sizeof(SFCommonProtoHeader))
#define SF_SEND_BUFF_END(task) (task->send.ptr->data + task->send.ptr->size)
#define SF_RECV_BUFF_END(task) (task->recv.ptr->data + task->recv.ptr->size)
#define SF_PROTO_UPDATE_EXTRA_BODY_SIZE \ #define SF_PROTO_UPDATE_EXTRA_BODY_SIZE \
sizeof(SFProtoIdempotencyAdditionalHeader) + FCFS_AUTH_SESSION_ID_LEN sizeof(SFProtoIdempotencyAdditionalHeader) + FCFS_AUTH_SESSION_ID_LEN
#define SF_PROTO_QUERY_EXTRA_BODY_SIZE FCFS_AUTH_SESSION_ID_LEN #define SF_PROTO_QUERY_EXTRA_BODY_SIZE FCFS_AUTH_SESSION_ID_LEN
#define SF_PROTO_CLIENT_SET_REQ(client_ctx, out_buff, \ #define SF_PROTO_CLIENT_SET_REQ_EX(client_ctx, auth_enabled, \
header, req, the_req_id, out_bytes) \ out_buff, header, req, the_req_id, out_bytes) \
do { \ do { \
char *the_req_start; \ char *the_req_start; \
header = (SFCommonProtoHeader *)out_buff; \ header = (SFCommonProtoHeader *)out_buff; \
the_req_start = (char *)(header + 1); \ the_req_start = (char *)(header + 1); \
out_bytes = sizeof(SFCommonProtoHeader) + sizeof(*req); \ out_bytes = sizeof(SFCommonProtoHeader) + sizeof(*req); \
if (client_ctx->auth.enabled) { \ if (auth_enabled) { \
out_bytes += FCFS_AUTH_SESSION_ID_LEN; \ out_bytes += FCFS_AUTH_SESSION_ID_LEN; \
memcpy(the_req_start, client_ctx->auth.ctx-> \ memcpy(the_req_start, client_ctx->auth.ctx-> \
session.id, FCFS_AUTH_SESSION_ID_LEN); \ session.id, FCFS_AUTH_SESSION_ID_LEN); \
@ -117,6 +129,10 @@
} \ } \
} while (0) } while (0)
#define SF_PROTO_CLIENT_SET_REQ(client_ctx, out_buff, \
header, req, the_req_id, out_bytes) \
SF_PROTO_CLIENT_SET_REQ_EX(client_ctx, client_ctx->auth.enabled, \
out_buff, header, req, the_req_id, out_bytes)
typedef struct sf_common_proto_header { typedef struct sf_common_proto_header {
unsigned char magic[4]; //magic number unsigned char magic[4]; //magic number
@ -172,8 +188,8 @@ typedef struct sf_proto_setup_channel_req {
typedef struct sf_proto_setup_channel_resp { typedef struct sf_proto_setup_channel_resp {
char channel_id[4]; char channel_id[4];
char key[4]; char key[4];
char server_id[4];
char buffer_size[4]; char buffer_size[4];
char padding[4];
} SFProtoSetupChannelResp; } SFProtoSetupChannelResp;
typedef struct sf_proto_rebind_channel_req { typedef struct sf_proto_rebind_channel_req {
@ -190,6 +206,33 @@ typedef struct sf_proto_report_req_receipt_body {
char req_id[8]; char req_id[8];
} SFProtoReportReqReceiptBody; } SFProtoReportReqReceiptBody;
typedef struct {
unsigned char servers[SF_CLUSTER_CONFIG_SIGN_LEN];
unsigned char cluster[SF_CLUSTER_CONFIG_SIGN_LEN];
} SFProtoConfigSigns;
typedef struct sf_proto_get_server_status_req {
SFProtoConfigSigns config_signs;
char server_id[4]; //my server id
union {
char is_leader;
char is_master;
};
char auth_enabled;
char padding[2];
} SFProtoGetServerStatusReq;
typedef struct sf_get_server_status_request {
const unsigned char *servers_sign;
const unsigned char *cluster_sign;
int server_id; //my server id
union {
bool is_leader;
bool is_master;
};
bool auth_enabled;
} SFGetServerStatusRequest;
typedef struct sf_group_server_info { typedef struct sf_group_server_info {
int id; int id;
bool is_leader; bool is_leader;
@ -245,7 +288,17 @@ int sf_proto_set_body_length(struct fast_task_info *task);
const char *sf_get_cmd_caption(const int cmd); const char *sf_get_cmd_caption(const int cmd);
int sf_proto_deal_task_done(struct fast_task_info *task, int sf_proto_deal_task_done(struct fast_task_info *task,
SFCommonTaskContext *ctx); const char *service_name, SFCommonTaskContext *ctx);
static inline void sf_proto_init_task_magic(struct fast_task_info *task)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
if (task->recv.ptr != task->send.ptr) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
}
static inline void sf_proto_init_task_context(struct fast_task_info *task, static inline void sf_proto_init_task_context(struct fast_task_info *task,
SFCommonTaskContext *ctx) SFCommonTaskContext *ctx)
@ -254,58 +307,180 @@ static inline void sf_proto_init_task_context(struct fast_task_info *task,
ctx->response.header.cmd = SF_PROTO_ACK; ctx->response.header.cmd = SF_PROTO_ACK;
ctx->response.header.body_len = 0; ctx->response.header.body_len = 0;
ctx->response.header.status = 0; ctx->response.header.status = 0;
ctx->response.header.flags = 0;
ctx->response.error.length = 0; ctx->response.error.length = 0;
ctx->response.error.message[0] = '\0'; ctx->response.error.message[0] = '\0';
ctx->log_level = LOG_ERR; ctx->log_level = LOG_ERR;
ctx->response_done = false; ctx->response_done = false;
ctx->need_response = true; ctx->need_response = true;
ctx->request.header.cmd = ((SFCommonProtoHeader *)task->data)->cmd; ctx->request.header.cmd = ((SFCommonProtoHeader *)
ctx->request.header.body_len = task->length - sizeof(SFCommonProtoHeader); task->recv.ptr->data)->cmd;
ctx->request.header.body_len = SF_RECV_BODY_LENGTH(task);
ctx->request.header.status = buff2short(((SFCommonProtoHeader *) ctx->request.header.status = buff2short(((SFCommonProtoHeader *)
task->data)->status); task->recv.ptr->data)->status);
ctx->request.body = task->data + sizeof(SFCommonProtoHeader); ctx->request.header.flags = buff2short(((SFCommonProtoHeader *)
task->recv.ptr->data)->flags);
if (task->recv_body != NULL) {
ctx->request.body = task->recv_body;
} else {
ctx->request.body = SF_PROTO_RECV_BODY(task);
}
} }
/* task send and recv buffer operations */
static inline int sf_set_task_send_buffer_size(
struct fast_task_info *task, const int expect_size)
{
int result;
if ((result=free_queue_set_buffer_size(task, task->send.ptr,
expect_size)) == 0)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
}
return result;
}
static inline int sf_set_task_recv_buffer_size(
struct fast_task_info *task, const int expect_size)
{
int result;
if ((result=free_queue_set_buffer_size(task, task->recv.ptr,
expect_size)) == 0)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
return result;
}
static inline int sf_set_task_send_max_buffer_size(
struct fast_task_info *task)
{
int result;
if ((result=free_queue_set_max_buffer_size(task, task->send.ptr)) == 0) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
}
return result;
}
static inline int sf_set_task_recv_max_buffer_size(
struct fast_task_info *task)
{
int result;
if ((result=free_queue_set_max_buffer_size(task, task->recv.ptr)) == 0) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
return result;
}
static inline int sf_realloc_task_send_buffer(
struct fast_task_info *task, const int expect_size)
{
int result;
if ((result=free_queue_realloc_buffer(task, task->send.ptr,
expect_size)) == 0)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
}
return result;
}
static inline int sf_realloc_task_recv_buffer(
struct fast_task_info *task, const int expect_size)
{
int result;
if ((result=free_queue_realloc_buffer(task, task->recv.ptr,
expect_size)) == 0)
{
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
return result;
}
static inline int sf_realloc_task_send_max_buffer(
struct fast_task_info *task)
{
int result;
if ((result=free_queue_realloc_max_buffer(task, task->send.ptr)) == 0) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->send.ptr->data)->magic);
}
return result;
}
static inline int sf_realloc_task_recv_max_buffer(
struct fast_task_info *task)
{
int result;
if ((result=free_queue_realloc_max_buffer(task, task->recv.ptr)) == 0) {
SF_PROTO_SET_MAGIC(((SFCommonProtoHeader *)
task->recv.ptr->data)->magic);
}
return result;
}
static inline void sf_log_network_error_ex1(SFResponseInfo *response, static inline void sf_log_network_error_ex1(SFResponseInfo *response,
const ConnectionInfo *conn, const int result, const ConnectionInfo *conn, const char *service_name,
const int log_level, const char *file, const int line) const int result, const int log_level,
const char *file, const int line)
{ {
if (response->error.length > 0) { if (response->error.length > 0) {
log_it_ex(&g_log_context, log_level, log_it_ex(&g_log_context, log_level, "file: %s, line: %d, "
"file: %s, line: %d, " "%s%sserver %s:%u response message: %s", file, line,
"server %s:%u response message: %s", (service_name != NULL ? service_name : ""),
file, line, conn->ip_addr, conn->port, (service_name != NULL ? " ": ""),
conn->ip_addr, conn->port,
response->error.message); response->error.message);
} else { } else {
log_it_ex(&g_log_context, log_level, log_it_ex(&g_log_context, log_level, "file: %s, line: %d, "
"file: %s, line: %d, " "communicate with %s%sserver %s:%u fail, "
"communicate with server %s:%u fail, "
"errno: %d, error info: %s", file, line, "errno: %d, error info: %s", file, line,
(service_name != NULL ? service_name : ""),
(service_name != NULL ? " ": ""),
conn->ip_addr, conn->port, conn->ip_addr, conn->port,
result, STRERROR(result)); result, STRERROR(result));
} }
} }
#define sf_log_network_error_ex(response, conn, result, log_level) \ #define sf_log_network_error_ex(response, conn, \
sf_log_network_error_ex1(response, conn, result, \ service_name, result, log_level) \
log_level, __FILE__, __LINE__) sf_log_network_error_ex1(response, conn, service_name, \
result, log_level, __FILE__, __LINE__)
#define sf_log_network_error(response, conn, result) \ #define sf_log_network_error(response, conn, service_name, result) \
sf_log_network_error_ex1(response, conn, result, \ sf_log_network_error_ex1(response, conn, service_name, result, \
LOG_ERR, __FILE__, __LINE__) LOG_ERR, __FILE__, __LINE__)
#define sf_log_network_error_for_update(response, conn, result) \ #define sf_log_network_error_for_update_ex(response, conn, \
sf_log_network_error_ex(response, conn, result, \ service_name, result, enoent_log_level, file, line) \
(result == SF_RETRIABLE_ERROR_CHANNEL_INVALID) ? \ sf_log_network_error_ex1(response, conn, service_name, result, \
LOG_DEBUG : LOG_ERR)
#define sf_log_network_error_for_delete(response, \
conn, result, enoent_log_level) \
sf_log_network_error_ex(response, conn, result, \
(result == SF_RETRIABLE_ERROR_CHANNEL_INVALID) ? \ (result == SF_RETRIABLE_ERROR_CHANNEL_INVALID) ? \
LOG_DEBUG : ((result == ENOENT || result == ENODATA) ? \ LOG_DEBUG : ((result == ENOENT || result == ENODATA) ? \
enoent_log_level : LOG_ERR)) enoent_log_level : LOG_ERR), file, line)
#define sf_log_network_error_for_update(response, conn, service_name, result) \
sf_log_network_error_for_update_ex(response, conn, service_name, \
result, LOG_ERR, __FILE__, __LINE__)
#define sf_log_network_error_for_delete_ex(response, conn, \
service_name, result, enoent_log_level, file, line) \
sf_log_network_error_ex1(response, conn, service_name, result, \
(result == SF_RETRIABLE_ERROR_CHANNEL_INVALID) ? \
LOG_DEBUG : ((result == ENOENT || result == ENODATA) ? \
enoent_log_level : LOG_ERR), file, line)
#define sf_log_network_error_for_delete(response, \
conn, service_name, result, enoent_log_level) \
sf_log_network_error_for_delete_ex(response, conn, service_name, \
result, enoent_log_level, __FILE__, __LINE__)
static inline int sf_server_expect_body_length(SFResponseInfo *response, static inline int sf_server_expect_body_length(SFResponseInfo *response,
const int body_length, const int expect_body_len) const int body_length, const int expect_body_len)
@ -387,6 +562,16 @@ int sf_recv_response(ConnectionInfo *conn, SFResponseInfo *response,
const int network_timeout, const unsigned char expect_cmd, const int network_timeout, const unsigned char expect_cmd,
char *recv_data, const int expect_body_len); char *recv_data, const int expect_body_len);
static inline int sf_recv_none_body_response(ConnectionInfo *conn,
SFResponseInfo *response, const int network_timeout,
const unsigned char expect_cmd)
{
char *recv_data = NULL;
const int expect_body_len = 0;
return sf_recv_response(conn, response, network_timeout,
expect_cmd, recv_data, expect_body_len);
}
int sf_recv_vary_response(ConnectionInfo *conn, SFResponseInfo *response, int sf_recv_vary_response(ConnectionInfo *conn, SFResponseInfo *response,
const int network_timeout, const unsigned char expect_cmd, const int network_timeout, const unsigned char expect_cmd,
SFProtoRecvBuffer *buffer, const int min_body_len); SFProtoRecvBuffer *buffer, const int min_body_len);
@ -421,6 +606,56 @@ static inline void sf_free_recv_buffer(SFProtoRecvBuffer *buffer)
} }
} }
static inline int sf_proto_send_buf1(ConnectionInfo *conn, char *data,
const int len, SFResponseInfo *response, const int network_timeout)
{
int result;
if (conn->comm_type == fc_comm_type_rdma) {
result = G_RDMA_CONNECTION_CALLBACKS.request_by_buf1(
conn, data, len, network_timeout * 1000);
} else {
result = tcpsenddata_nb(conn->sock, data, len, network_timeout);
}
if (result != 0) {
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"send data fail, errno: %d, error info: %s",
result, STRERROR(result));
}
return result;
}
static inline int sf_proto_send_buf2(ConnectionInfo *conn, char *buff1,
const int length1, char *buff2, const int length2,
SFResponseInfo *response, const int network_timeout)
{
int result;
if (conn->comm_type == fc_comm_type_rdma) {
result = G_RDMA_CONNECTION_CALLBACKS.request_by_buf2(
conn, buff1, length1, buff2, length2,
network_timeout * 1000);
} else {
if ((result=tcpsenddata_nb(conn->sock, buff1, length1,
network_timeout)) == 0)
{
result = tcpsenddata_nb(conn->sock, buff2, length2,
network_timeout);
}
}
if (result != 0) {
response->error.length = snprintf(response->error.message,
sizeof(response->error.message),
"send data fail, errno: %d, error info: %s",
result, STRERROR(result));
}
return result;
}
int sf_send_and_recv_response_header(ConnectionInfo *conn, char *data, int sf_send_and_recv_response_header(ConnectionInfo *conn, char *data,
const int len, SFResponseInfo *response, const int network_timeout); const int len, SFResponseInfo *response, const int network_timeout);
@ -481,16 +716,27 @@ int sf_send_and_recv_vary_response(ConnectionInfo *conn,
const int network_timeout, const unsigned char expect_cmd, const int network_timeout, const unsigned char expect_cmd,
SFProtoRecvBuffer *buffer, const int min_body_len); SFProtoRecvBuffer *buffer, const int min_body_len);
static inline void sf_proto_extract_header(const SFCommonProtoHeader static inline int sf_proto_parse_header(const SFCommonProtoHeader
*header_proto, SFHeaderInfo *header_info) *header_proto, SFResponseInfo *response)
{ {
header_info->cmd = header_proto->cmd; if (!SF_PROTO_CHECK_MAGIC(header_proto->magic)) {
header_info->body_len = buff2int(header_proto->body_len); response->error.length = snprintf(response->error.message,
header_info->flags = buff2short(header_proto->flags); sizeof(response->error.message),
header_info->status = buff2short(header_proto->status); "magic "SF_PROTO_MAGIC_FORMAT" is invalid, "
if (header_info->status > 255) { "expect: "SF_PROTO_MAGIC_FORMAT,
header_info->status = sf_localize_errno(header_info->status); SF_PROTO_MAGIC_PARAMS(header_proto->magic),
SF_PROTO_MAGIC_EXPECT_PARAMS);
return EINVAL;
} }
response->header.cmd = header_proto->cmd;
response->header.body_len = buff2int(header_proto->body_len);
response->header.flags = buff2short(header_proto->flags);
response->header.status = buff2short(header_proto->status);
if (response->header.status > 255) {
response->header.status = sf_localize_errno(response->header.status);
}
return 0;
} }
static inline void sf_proto_pack_limit(const SFListLimitInfo static inline void sf_proto_pack_limit(const SFListLimitInfo
@ -529,16 +775,33 @@ int sf_proto_deal_ack(struct fast_task_info *task,
SFRequestInfo *request, SFResponseInfo *response); SFRequestInfo *request, SFResponseInfo *response);
int sf_proto_rebind_idempotency_channel(ConnectionInfo *conn, int sf_proto_rebind_idempotency_channel(ConnectionInfo *conn,
const uint32_t channel_id, const int key, const int network_timeout); const char *service_name, const uint32_t channel_id,
const int key, const int network_timeout);
int sf_proto_get_group_servers(ConnectionInfo *conn, int sf_proto_get_group_servers(ConnectionInfo *conn,
const int network_timeout, const int group_id, const char *service_name, const int network_timeout,
SFGroupServerArray *sarray); const int group_id, SFGroupServerArray *sarray);
int sf_proto_get_leader(ConnectionInfo *conn, int sf_proto_get_leader(ConnectionInfo *conn, const char *service_name,
const int network_timeout, const int network_timeout, SFClientServerEntry *leader);
SFClientServerEntry *leader);
static inline void sf_proto_get_server_status_pack(
const SFGetServerStatusRequest *r,
SFProtoGetServerStatusReq *req)
{
int2buff(r->server_id, req->server_id);
req->is_leader = (r->is_leader ? 1 : 0);
req->auth_enabled = (r->auth_enabled ? 1 : 0);
memcpy(req->config_signs.servers, r->servers_sign,
SF_CLUSTER_CONFIG_SIGN_LEN);
if (r->cluster_sign != NULL) {
memcpy(req->config_signs.cluster, r->cluster_sign,
SF_CLUSTER_CONFIG_SIGN_LEN);
} else {
memset(req->config_signs.cluster, 0,
SF_CLUSTER_CONFIG_SIGN_LEN);
}
}
#define SF_CLIENT_RELEASE_CONNECTION(cm, conn, result) \ #define SF_CLIENT_RELEASE_CONNECTION(cm, conn, result) \
do { \ do { \

View File

@ -21,6 +21,7 @@
#include "fastcommon/common_define.h" #include "fastcommon/common_define.h"
#include "fastcommon/shared_func.h" #include "fastcommon/shared_func.h"
#include "fastcommon/fast_buffer.h" #include "fastcommon/fast_buffer.h"
#include "fastcommon/uniq_skiplist.h"
#include "fastcommon/hash.h" #include "fastcommon/hash.h"
#define SF_SERIALIZER_VALUE_TYPE_COUNT 12 #define SF_SERIALIZER_VALUE_TYPE_COUNT 12
@ -438,6 +439,44 @@ static inline int sf_serializer_pack_id_name_array(FastBuffer *buffer,
return 0; return 0;
} }
static inline int sf_serializer_pack_id_name_skiplist(
FastBuffer *buffer, const unsigned char fid,
UniqSkiplist *sl)
{
int result;
int length;
SFSerializerPackFieldArray *obj;
const id_name_pair_t *pair;
UniqSkiplistIterator it;
char *p;
length = sizeof(SFSerializerPackFieldArray);
uniq_skiplist_iterator(sl, &it);
while ((pair=uniq_skiplist_next(&it)) != NULL) {
length += sizeof(int64_t) + pair->name.len +
sizeof(SFSerializerPackStringValue);
}
if ((result=fast_buffer_check_inc_size(buffer, length)) != 0) {
return result;
}
obj = (SFSerializerPackFieldArray *)(buffer->data + buffer->length);
obj->field.id = fid;
obj->field.type = sf_serializer_value_type_id_name_array;
int2buff(uniq_skiplist_count(sl), obj->value.count);
p = obj->value.ptr;
uniq_skiplist_iterator(sl, &it);
while ((pair=uniq_skiplist_next(&it)) != NULL) {
long2buff(pair->id, p);
p += sizeof(int64_t);
SF_SERIALIZER_PACK_STRING_AND_MOVE_PTR(p, &pair->name);
}
buffer->length += length;
return 0;
}
static inline int sf_serializer_pack_map(FastBuffer *buffer, static inline int sf_serializer_pack_map(FastBuffer *buffer,
const unsigned char fid, const key_value_pair_t *kv_pairs, const unsigned char fid, const key_value_pair_t *kv_pairs,
const int count) const int count)

View File

@ -25,6 +25,7 @@
#include <string.h> #include <string.h>
#include <errno.h> #include <errno.h>
#include <fcntl.h> #include <fcntl.h>
#include <ifaddrs.h>
#include "fastcommon/logger.h" #include "fastcommon/logger.h"
#include "fastcommon/sockopt.h" #include "fastcommon/sockopt.h"
#include "fastcommon/shared_func.h" #include "fastcommon/shared_func.h"
@ -32,9 +33,8 @@
#include "fastcommon/sched_thread.h" #include "fastcommon/sched_thread.h"
#include "fastcommon/ioevent_loop.h" #include "fastcommon/ioevent_loop.h"
#include "fastcommon/fc_memory.h" #include "fastcommon/fc_memory.h"
#include "sf_nio.h" #include "sf_proto.h"
#include "sf_util.h" #include "sf_util.h"
#include "sf_global.h"
#include "sf_service.h" #include "sf_service.h"
#if defined(OS_LINUX) #if defined(OS_LINUX)
@ -57,62 +57,46 @@ struct worker_thread_context {
struct nio_thread_data *thread_data; struct nio_thread_data *thread_data;
}; };
struct accept_thread_context {
SFContext *sf_context;
int server_sock;
};
int sf_init_task(struct fast_task_info *task)
{
task->connect_timeout = SF_G_CONNECT_TIMEOUT; //for client side
task->network_timeout = SF_G_NETWORK_TIMEOUT;
return 0;
}
static void *worker_thread_entrance(void *arg); static void *worker_thread_entrance(void *arg);
static int sf_init_free_queues(const int task_arg_size, static int sf_init_free_queue(SFContext *sf_context, const char *name,
TaskInitCallback init_callback) const bool double_buffers, const bool need_shrink_task_buffer,
const int task_padding_size, const int task_arg_size,
TaskInitCallback init_callback, void *init_arg)
{ {
#define ALLOC_CONNECTIONS_ONCE 1024
static bool sf_inited = false;
int result; int result;
int buffer_size;
int m; int m;
int init_connections; int max_m;
int alloc_conn_once; int alloc_conn_once;
if (sf_inited) {
return 0;
}
sf_inited = true;
if ((result=set_rand_seed()) != 0) { if ((result=set_rand_seed()) != 0) {
logCrit("file: "__FILE__", line: %d, " logCrit("file: "__FILE__", line: %d, "
"set_rand_seed fail, program exit!", __LINE__); "set_rand_seed fail, program exit!", __LINE__);
return result; return result;
} }
m = g_sf_global_vars.min_buff_size / (64 * 1024); if (strcmp(name, "cluster") == 0 || strcmp(name, "replica") == 0) {
buffer_size = FC_MAX(4 * 1024 * 1024, sf_context->
net_buffer_cfg.max_buff_size);
max_m = 64;
} else {
buffer_size = sf_context->net_buffer_cfg.min_buff_size;
max_m = 16;
}
m = buffer_size / (64 * 1024);
if (m == 0) { if (m == 0) {
m = 1; m = 1;
} else if (m > 16) { } else if (m > max_m) {
m = 16; m = max_m;
}
alloc_conn_once = ALLOC_CONNECTIONS_ONCE / m;
init_connections = g_sf_global_vars.max_connections < alloc_conn_once ?
g_sf_global_vars.max_connections : alloc_conn_once;
if ((result=free_queue_init_ex2(g_sf_global_vars.max_connections,
init_connections, alloc_conn_once, g_sf_global_vars.
min_buff_size, g_sf_global_vars.max_buff_size,
task_arg_size, init_callback != NULL ?
init_callback : sf_init_task)) != 0)
{
return result;
} }
alloc_conn_once = 256 / m;
return 0; return free_queue_init_ex2(&sf_context->free_queue, name, double_buffers,
need_shrink_task_buffer, sf_context->net_buffer_cfg.max_connections,
alloc_conn_once, sf_context->net_buffer_cfg.min_buff_size,
sf_context->net_buffer_cfg.max_buff_size, task_padding_size,
task_arg_size, init_callback, init_arg);
} }
int sf_service_init_ex2(SFContext *sf_context, const char *name, int sf_service_init_ex2(SFContext *sf_context, const char *name,
@ -121,14 +105,21 @@ int sf_service_init_ex2(SFContext *sf_context, const char *name,
ThreadLoopCallback thread_loop_callback, ThreadLoopCallback thread_loop_callback,
sf_accept_done_callback accept_done_callback, sf_accept_done_callback accept_done_callback,
sf_set_body_length_callback set_body_length_func, sf_set_body_length_callback set_body_length_func,
sf_deal_task_func deal_func, TaskCleanUpCallback task_cleanup_func, sf_alloc_recv_buffer_callback alloc_recv_buffer_func,
sf_send_done_callback send_done_callback,
sf_deal_task_callback deal_func, TaskCleanUpCallback task_cleanup_func,
sf_recv_timeout_callback timeout_callback, const int net_timeout_ms, sf_recv_timeout_callback timeout_callback, const int net_timeout_ms,
const int proto_header_size, const int task_arg_size, const int proto_header_size, const int task_padding_size,
TaskInitCallback init_callback, sf_release_buffer_callback const int task_arg_size, const bool double_buffers,
release_buffer_callback) const bool need_shrink_task_buffer, const bool explicit_post_recv,
TaskInitCallback init_callback, void *init_arg,
sf_release_buffer_callback release_buffer_callback)
{ {
int result; int result;
int bytes; int bytes;
int extra_events;
int max_entries;
int i;
struct worker_thread_context *thread_contexts; struct worker_thread_context *thread_contexts;
struct worker_thread_context *thread_ctx; struct worker_thread_context *thread_ctx;
struct nio_thread_data *thread_data; struct nio_thread_data *thread_data;
@ -136,15 +127,26 @@ int sf_service_init_ex2(SFContext *sf_context, const char *name,
pthread_t tid; pthread_t tid;
pthread_attr_t thread_attr; pthread_attr_t thread_attr;
snprintf(sf_context->name, sizeof(sf_context->name), "%s", name); fc_safe_strcpy(sf_context->name, name);
sf_context->realloc_task_buffer = g_sf_global_vars. sf_context->connect_need_log = true;
min_buff_size < g_sf_global_vars.max_buff_size; sf_context->realloc_task_buffer = sf_context->net_buffer_cfg.
sf_context->accept_done_func = accept_done_callback; min_buff_size < sf_context->net_buffer_cfg.max_buff_size;
sf_context->callbacks.accept_done = accept_done_callback;
sf_set_parameters_ex(sf_context, proto_header_size, sf_set_parameters_ex(sf_context, proto_header_size,
set_body_length_func, deal_func, task_cleanup_func, set_body_length_func, alloc_recv_buffer_func,
send_done_callback, deal_func, task_cleanup_func,
timeout_callback, release_buffer_callback); timeout_callback, release_buffer_callback);
if (explicit_post_recv) {
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
sf_context->handlers[i].handlers[SF_RDMACM_NETWORK_HANDLER_INDEX].
explicit_post_recv = true;
}
}
if ((result=sf_init_free_queues(task_arg_size, init_callback)) != 0) { if ((result=sf_init_free_queue(sf_context, name, double_buffers,
need_shrink_task_buffer, task_padding_size,
task_arg_size, init_callback, init_arg)) != 0)
{
return result; return result;
} }
@ -169,11 +171,71 @@ int sf_service_init_ex2(SFContext *sf_context, const char *name,
return ENOMEM; return ENOMEM;
} }
if (SF_G_EPOLL_EDGE_TRIGGER) {
#ifdef OS_LINUX
#if IOEVENT_USE_EPOLL
extra_events = EPOLLET;
#else
extra_events = 0;
#endif
#elif defined(OS_FREEBSD)
extra_events = EV_CLEAR;
#else
extra_events = 0;
#endif
} else {
extra_events = 0;
}
max_entries = (sf_context->net_buffer_cfg.max_connections +
sf_context->work_threads - 1) / sf_context->work_threads;
if (strcmp(sf_context->name, "cluster") == 0 ||
strcmp(sf_context->name, "replica") == 0)
{
if (max_entries < 1024) {
max_entries += 8;
} else {
max_entries = 1024;
}
} else {
if (max_entries < 4 * 1024) {
max_entries = max_entries * 2;
} else if (max_entries < 8 * 1024) {
max_entries = (max_entries * 3) / 2;
} else if (max_entries < 16 * 1024) {
max_entries = (max_entries * 5) / 4;
} else if (max_entries < 32 * 1024) {
max_entries = (max_entries * 6) / 5;
} else if (max_entries < 64 * 1024) {
max_entries = (max_entries * 11) / 10;
} else if (max_entries < 128 * 1024) {
max_entries = (max_entries * 21) / 20;
}
#if IOEVENT_USE_URING
if (sf_context->use_io_uring) {
if (max_entries > 32 * 1024) {
max_entries = 32 * 1024;
}
}
#endif
}
g_current_time = time(NULL);
sf_context->thread_count = 0; sf_context->thread_count = 0;
data_end = sf_context->thread_data + sf_context->work_threads; data_end = sf_context->thread_data + sf_context->work_threads;
for (thread_data=sf_context->thread_data,thread_ctx=thread_contexts; for (thread_data=sf_context->thread_data,thread_ctx=thread_contexts;
thread_data<data_end; thread_data++,thread_ctx++) thread_data<data_end; thread_data++,thread_ctx++)
{ {
thread_data->timeout_ms = net_timeout_ms;
FC_INIT_LIST_HEAD(&thread_data->polling_queue);
if (sf_context->smart_polling.enabled) {
thread_data->busy_polling_callback =
sf_rdma_busy_polling_callback;
} else {
thread_data->busy_polling_callback = NULL;
}
thread_data->thread_loop_callback = thread_loop_callback; thread_data->thread_loop_callback = thread_loop_callback;
if (alloc_thread_extra_data_callback != NULL) { if (alloc_thread_extra_data_callback != NULL) {
thread_data->arg = alloc_thread_extra_data_callback( thread_data->arg = alloc_thread_extra_data_callback(
@ -183,32 +245,60 @@ int sf_service_init_ex2(SFContext *sf_context, const char *name,
thread_data->arg = NULL; thread_data->arg = NULL;
} }
if (ioevent_init(&thread_data->ev_puller, if ((result=ioevent_init(&thread_data->ev_puller, sf_context->
g_sf_global_vars.max_connections + 2, net_timeout_ms, 0) != 0) name, sf_context->use_io_uring, max_entries,
net_timeout_ms, extra_events)) != 0)
{ {
result = errno != 0 ? errno : ENOMEM; char prompt[256];
#if IOEVENT_USE_URING
if (sf_context->use_io_uring) {
if (result == EPERM) {
strcpy(prompt, " make sure kernel."
"io_uring_disabled set to 0");
} else if (result == EINVAL) {
sprintf(prompt, " maybe max_connections: %d is too large"
" or [%s]'s work_threads: %d is too small",
sf_context->net_buffer_cfg.max_connections,
sf_context->name, sf_context->work_threads);
} else {
*prompt = '\0';
}
} else {
#endif
*prompt = '\0';
#if IOEVENT_USE_URING
}
#endif
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"ioevent_init fail, " "ioevent_init fail, errno: %d, error info: %s.%s"
"errno: %d, error info: %s", , __LINE__, result, strerror(result), prompt);
__LINE__, result, strerror(result));
return result; return result;
} }
result = fast_timer_init(&thread_data->timer, #if IOEVENT_USE_URING
2 * g_sf_global_vars.network_timeout, g_current_time); if (sf_context->use_io_uring && send_done_callback != NULL) {
ioevent_set_send_zc_done_notify(&thread_data->ev_puller, true);
}
#endif
result = fast_timer_init(&thread_data->timer, 2 * sf_context->
net_buffer_cfg.network_timeout, g_current_time);
if (result != 0) { if (result != 0) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"fast_timer_init fail, " "fast_timer_init fail, errno: %d, error info: %s",
"errno: %d, error info: %s",
__LINE__, result, strerror(result)); __LINE__, result, strerror(result));
return result; return result;
} }
if ((result=init_pthread_lock(&thread_data->waiting_queue.lock)) != 0) { if ((result=init_pthread_lock(&thread_data->
waiting_queue.lock)) != 0)
{
return result; return result;
} }
#if defined(OS_LINUX) #if defined(OS_LINUX)
FC_NOTIFY_READ_FD(thread_data) = eventfd(0, EFD_NONBLOCK); FC_NOTIFY_READ_FD(thread_data) = eventfd(0,
EFD_NONBLOCK | EFD_CLOEXEC);
if (FC_NOTIFY_READ_FD(thread_data) < 0) { if (FC_NOTIFY_READ_FD(thread_data) < 0) {
result = errno != 0 ? errno : EPERM; result = errno != 0 ? errno : EPERM;
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
@ -232,6 +322,8 @@ int sf_service_init_ex2(SFContext *sf_context, const char *name,
{ {
break; break;
} }
FC_SET_CLOEXEC(FC_NOTIFY_READ_FD(thread_data));
FC_SET_CLOEXEC(FC_NOTIFY_WRITE_FD(thread_data));
#endif #endif
thread_ctx->sf_context = sf_context; thread_ctx->sf_context = sf_context;
@ -256,7 +348,7 @@ int sf_service_destroy_ex(SFContext *sf_context)
{ {
struct nio_thread_data *data_end, *thread_data; struct nio_thread_data *data_end, *thread_data;
free_queue_destroy(); free_queue_destroy(&sf_context->free_queue);
data_end = sf_context->thread_data + sf_context->work_threads; data_end = sf_context->thread_data + sf_context->work_threads;
for (thread_data=sf_context->thread_data; thread_data<data_end; for (thread_data=sf_context->thread_data; thread_data<data_end;
thread_data++) thread_data++)
@ -309,7 +401,7 @@ static void *worker_thread_entrance(void *arg)
ioevent_loop(thread_ctx->thread_data, ioevent_loop(thread_ctx->thread_data,
sf_recv_notify_read, sf_recv_notify_read,
thread_ctx->sf_context->task_cleanup_func, thread_ctx->sf_context->callbacks.task_cleanup,
&g_sf_global_vars.continue_flag); &g_sf_global_vars.continue_flag);
ioevent_destroy(&thread_ctx->thread_data->ev_puller); ioevent_destroy(&thread_ctx->thread_data->ev_puller);
@ -324,15 +416,20 @@ static void *worker_thread_entrance(void *arg)
return NULL; return NULL;
} }
static int _socket_server(const char *bind_addr, int port, int *sock) int sf_socket_create_server(SFListener *listener,
int af, const char *bind_addr)
{ {
int result; int result;
*sock = socketServer(bind_addr, port, &result);
if (*sock < 0) { listener->sock = socketServer2(af, bind_addr,
listener->port, &result);
if (listener->sock < 0) {
return result; return result;
} }
if ((result=tcpsetserveropt(*sock, g_sf_global_vars.network_timeout)) != 0) { if ((result=tcpsetserveropt(listener->sock, listener->handler->
fh->ctx->net_buffer_cfg.network_timeout)) != 0)
{
return result; return result;
} }
@ -342,55 +439,113 @@ static int _socket_server(const char *bind_addr, int port, int *sock)
int sf_socket_server_ex(SFContext *sf_context) int sf_socket_server_ex(SFContext *sf_context)
{ {
int result; int result;
int i;
bool dual_ports;
const char *bind_addr; const char *bind_addr;
SFAddressFamilyHandler *fh;
SFNetworkHandler *handler;
SFNetworkHandler *end;
sf_context->inner_sock = sf_context->outer_sock = -1; for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->outer_port == sf_context->inner_port) { fh = sf_context->handlers + i;
if (*sf_context->outer_bind_addr == '\0' || if (fh->af == AF_UNSPEC) {
*sf_context->inner_bind_addr == '\0') { continue;
}
end = fh->handlers + SF_NETWORK_HANDLER_COUNT;
for (handler=fh->handlers; handler<end; handler++) {
if (!handler->enabled) {
continue;
}
handler->inner.enabled = false;
handler->outer.enabled = false;
if (handler->outer.port == handler->inner.port) {
if (*fh->outer_bind_addr == '\0' ||
*fh->inner_bind_addr == '\0')
{
bind_addr = ""; bind_addr = "";
return _socket_server(bind_addr, sf_context->outer_port, if ((result=handler->create_server(&handler->
&sf_context->outer_sock); outer, fh->af, bind_addr)) != 0)
} else if (strcmp(sf_context->outer_bind_addr, {
sf_context->inner_bind_addr) == 0) { return result;
bind_addr = sf_context->outer_bind_addr; }
handler->outer.enabled = true;
dual_ports = false;
} else if (strcmp(fh->outer_bind_addr,
fh->inner_bind_addr) == 0)
{
bind_addr = fh->outer_bind_addr;
if (is_private_ip(bind_addr)) { if (is_private_ip(bind_addr)) {
return _socket_server(bind_addr, sf_context-> if ((result=handler->create_server(&handler->
inner_port, &sf_context->inner_sock); inner, fh->af, bind_addr)) != 0)
{
return result;
}
handler->inner.enabled = true;
} else { } else {
return _socket_server(bind_addr, sf_context-> if ((result=handler->create_server(&handler->
outer_port, &sf_context->outer_sock); outer, fh->af, bind_addr)) != 0)
{
return result;
} }
handler->outer.enabled = true;
} }
dual_ports = false;
} else {
dual_ports = true;
}
} else {
dual_ports = true;
} }
if ((result=_socket_server(sf_context->outer_bind_addr, if (dual_ports) {
sf_context->outer_port, &sf_context->outer_sock)) != 0) if ((result=handler->create_server(&handler->outer,
fh->af, fh->outer_bind_addr)) != 0)
{ {
return result; return result;
} }
if ((result=_socket_server(sf_context->inner_bind_addr, if ((result=handler->create_server(&handler->inner,
sf_context->inner_port, &sf_context->inner_sock)) != 0) fh->af, fh->inner_bind_addr)) != 0)
{ {
return result; return result;
} }
handler->inner.enabled = true;
handler->outer.enabled = true;
}
/*
logInfo("%p [%d] inner {port: %d, enabled: %d}, "
"outer {port: %d, enabled: %d}", sf_context,
(int)(handler-sf_context->handlers),
handler->inner.port, handler->inner.enabled,
handler->outer.port, handler->outer.enabled);
*/
}
}
return 0; return 0;
} }
static void accept_run(struct accept_thread_context *accept_context) void sf_socket_close_server(SFListener *listener)
{
if (listener->sock >= 0) {
close(listener->sock);
listener->sock = -1;
}
}
struct fast_task_info *sf_socket_accept_connection(SFListener *listener)
{ {
int incomesock; int incomesock;
int port; int port;
struct sockaddr_in inaddr;
socklen_t sockaddr_len; socklen_t sockaddr_len;
struct fast_task_info *task; struct fast_task_info *task;
while (g_sf_global_vars.continue_flag) { sockaddr_len = sizeof(listener->inaddr);
sockaddr_len = sizeof(inaddr); incomesock = accept(listener->sock, (struct sockaddr *)
incomesock = accept(accept_context->server_sock, &listener->inaddr, &sockaddr_len);
(struct sockaddr*)&inaddr, &sockaddr_len);
if (incomesock < 0) { //error if (incomesock < 0) { //error
if (!(errno == EINTR || errno == EAGAIN)) { if (!(errno == EINTR || errno == EAGAIN)) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
@ -398,57 +553,101 @@ static void accept_run(struct accept_thread_context *accept_context)
__LINE__, errno, strerror(errno)); __LINE__, errno, strerror(errno));
} }
continue; return NULL;
} }
if (tcpsetnonblockopt(incomesock) != 0) { if (tcpsetnonblockopt(incomesock) != 0) {
close(incomesock); close(incomesock);
continue; return NULL;
} }
FC_SET_CLOEXEC(incomesock);
if ((task=sf_alloc_init_task(accept_context-> if ((task=sf_alloc_init_server_task(listener->handler,
sf_context, incomesock)) == NULL) incomesock)) == NULL)
{ {
close(incomesock); close(incomesock);
continue; return NULL;
} }
getPeerIpAddPort(incomesock, task->client_ip, getPeerIpAddPort(incomesock, task->client_ip,
sizeof(task->client_ip), &port); sizeof(task->client_ip), &port);
task->port = port; task->port = port;
task->thread_data = accept_context->sf_context->thread_data + return task;
incomesock % accept_context->sf_context->work_threads; }
if (accept_context->sf_context->accept_done_func != NULL) {
accept_context->sf_context->accept_done_func(task, void sf_socket_close_ex(SFContext *sf_context)
accept_context->server_sock == {
accept_context->sf_context->inner_sock); int i;
SFNetworkHandler *handler;
SFNetworkHandler *end;
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->handlers[i].af == AF_UNSPEC) {
continue;
}
end = sf_context->handlers[i].handlers + SF_NETWORK_HANDLER_COUNT;
for (handler=sf_context->handlers[i].handlers; handler<end; handler++) {
if (!handler->enabled) {
continue;
}
if (handler->outer.enabled) {
handler->close_server(&handler->outer);
}
if (handler->inner.enabled) {
handler->close_server(&handler->inner);
}
}
}
}
static void accept_run(SFListener *listener)
{
struct fast_task_info *task;
while (g_sf_global_vars.continue_flag) {
if ((task=listener->handler->accept_connection(listener)) == NULL) {
continue;
}
task->thread_data = listener->handler->fh->ctx->thread_data +
task->event.fd % listener->handler->fh->ctx->work_threads;
if (listener->handler->fh->ctx->callbacks.accept_done != NULL) {
if (listener->handler->fh->ctx->callbacks.accept_done(task,
listener->inaddr.sin_addr.s_addr,
listener->is_inner) != 0)
{
listener->handler->close_connection(task);
sf_release_task(task);
continue;
}
} }
if (sf_nio_notify(task, SF_NIO_STAGE_INIT) != 0) { if (sf_nio_notify(task, SF_NIO_STAGE_INIT) != 0) {
close(incomesock); listener->handler->close_connection(task);
sf_release_task(task); sf_release_task(task);
} }
} }
} }
static void *accept_thread_entrance(struct accept_thread_context static void *accept_thread_entrance(SFListener *listener)
*accept_context)
{ {
#ifdef OS_LINUX #ifdef OS_LINUX
{ {
char thread_name[32]; char thread_name[32];
snprintf(thread_name, sizeof(thread_name), "%s-listen", snprintf(thread_name, sizeof(thread_name), "%s-%s-listen",
accept_context->sf_context->name); listener->handler->comm_type == fc_comm_type_sock ?
"sock" : "rdma", listener->handler->fh->ctx->name);
prctl(PR_SET_NAME, thread_name); prctl(PR_SET_NAME, thread_name);
} }
#endif #endif
accept_run(accept_context); accept_run(listener);
return NULL; return NULL;
} }
void _accept_loop(struct accept_thread_context *accept_context, int _accept_loop(SFListener *listener, const int accept_threads)
const int accept_threads)
{ {
pthread_t tid; pthread_t tid;
pthread_attr_t thread_attr; pthread_attr_t thread_attr;
@ -456,7 +655,7 @@ void _accept_loop(struct accept_thread_context *accept_context,
int i; int i;
if (accept_threads <= 0) { if (accept_threads <= 0) {
return; return 0;
} }
if ((result=init_pthread_attr(&thread_attr, g_sf_global_vars. if ((result=init_pthread_attr(&thread_attr, g_sf_global_vars.
@ -464,68 +663,83 @@ void _accept_loop(struct accept_thread_context *accept_context,
{ {
logWarning("file: "__FILE__", line: %d, " logWarning("file: "__FILE__", line: %d, "
"init_pthread_attr fail!", __LINE__); "init_pthread_attr fail!", __LINE__);
return result;
} }
else {
for (i=0; i<accept_threads; i++) { for (i=0; i<accept_threads; i++) {
if ((result=pthread_create(&tid, &thread_attr, if ((result=pthread_create(&tid, &thread_attr,
(void * (*)(void *))accept_thread_entrance, (void * (*)(void *))accept_thread_entrance,
accept_context)) != 0) listener)) != 0)
{ {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"create thread failed, startup threads: %d, " "create thread failed, startup threads: %d, "
"errno: %d, error info: %s", "errno: %d, error info: %s",
__LINE__, i, result, strerror(result)); __LINE__, i, result, strerror(result));
break; return result;
} }
} }
pthread_attr_destroy(&thread_attr); pthread_attr_destroy(&thread_attr);
} return 0;
} }
void sf_accept_loop_ex(SFContext *sf_context, const bool block) int sf_accept_loop_ex(SFContext *sf_context, const bool blocked)
{ {
struct accept_thread_context *accept_contexts; int i;
int count; SFNetworkHandler *handler;
int bytes; SFNetworkHandler *hend;
SFListener *listeners[SF_ADDRESS_FAMILY_COUNT *
SF_NETWORK_HANDLER_COUNT * 2];
SFListener **listener;
SFListener **last;
SFListener **lend;
if (sf_context->outer_sock >= 0) { listener = listeners;
count = 2; for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->handlers[i].af == AF_UNSPEC) {
continue;
}
hend = sf_context->handlers[i].handlers + SF_NETWORK_HANDLER_COUNT;
for (handler=sf_context->handlers[i].handlers;
handler<hend; handler++)
{
if (!handler->enabled) {
continue;
}
if (handler->inner.enabled) {
*listener++ = &handler->inner;
}
if (handler->outer.enabled) {
*listener++ = &handler->outer;
}
}
}
if (listener == listeners) {
logError("file: "__FILE__", line: %d, "
"no listener!", __LINE__);
return ENOENT;
}
last = listener - 1;
if (blocked) {
lend = listener - 1;
} else { } else {
count = 1; lend = listener;
} }
bytes = sizeof(struct accept_thread_context) * count; for (listener=listeners; listener<lend; listener++) {
accept_contexts = (struct accept_thread_context *)fc_malloc(bytes); _accept_loop(*listener, sf_context->accept_threads);
if (accept_contexts == NULL) {
return;
} }
accept_contexts[0].sf_context = sf_context; if (blocked) {
accept_contexts[0].server_sock = sf_context->inner_sock; _accept_loop(*last, sf_context->accept_threads - 1);
accept_run(*last);
if (sf_context->outer_sock >= 0) {
accept_contexts[1].sf_context = sf_context;
accept_contexts[1].server_sock = sf_context->outer_sock;
if (sf_context->inner_sock >= 0) {
_accept_loop(accept_contexts, sf_context->accept_threads);
} }
if (block) { return 0;
_accept_loop(accept_contexts + 1, sf_context->accept_threads - 1);
accept_run(accept_contexts + 1);
} else {
_accept_loop(accept_contexts + 1, sf_context->accept_threads);
}
} else {
if (block) {
_accept_loop(accept_contexts, sf_context->accept_threads - 1);
accept_run(accept_contexts);
} else {
_accept_loop(accept_contexts, sf_context->accept_threads);
}
}
} }
#if defined(DEBUG_FLAG) #if defined(DEBUG_FLAG)
@ -639,15 +853,13 @@ int sf_setup_signal_handler()
return 0; return 0;
} }
#define LOG_SCHEDULE_ENTRIES_COUNT 3
int sf_startup_schedule(pthread_t *schedule_tid) int sf_startup_schedule(pthread_t *schedule_tid)
{ {
ScheduleArray scheduleArray; ScheduleArray scheduleArray;
ScheduleEntry scheduleEntries[LOG_SCHEDULE_ENTRIES_COUNT]; ScheduleEntry scheduleEntries[SF_LOG_SCHEDULE_ENTRIES_COUNT];
scheduleArray.entries = scheduleEntries; scheduleArray.entries = scheduleEntries;
sf_setup_schedule(&g_log_context, &g_sf_global_vars.error_log, sf_logger_setup_schedule(&g_log_context, &g_sf_global_vars.error_log,
&scheduleArray); &scheduleArray);
return sched_start(&scheduleArray, schedule_tid, return sched_start(&scheduleArray, schedule_tid,
g_sf_global_vars.thread_stack_size, (bool * volatile) g_sf_global_vars.thread_stack_size, (bool * volatile)
@ -658,7 +870,7 @@ int sf_add_slow_log_schedule(SFSlowLogContext *slowlog_ctx)
{ {
int result; int result;
ScheduleArray scheduleArray; ScheduleArray scheduleArray;
ScheduleEntry scheduleEntries[LOG_SCHEDULE_ENTRIES_COUNT]; ScheduleEntry scheduleEntries[SF_LOG_SCHEDULE_ENTRIES_COUNT];
if (!slowlog_ctx->cfg.enabled) { if (!slowlog_ctx->cfg.enabled) {
return 0; return 0;
@ -671,8 +883,8 @@ int sf_add_slow_log_schedule(SFSlowLogContext *slowlog_ctx)
} }
scheduleArray.entries = scheduleEntries; scheduleArray.entries = scheduleEntries;
sf_setup_schedule(&slowlog_ctx->ctx, &slowlog_ctx->cfg.log_cfg, sf_logger_setup_schedule(&slowlog_ctx->ctx, &slowlog_ctx->
&scheduleArray); cfg.log_cfg, &scheduleArray);
return sched_add_entries(&scheduleArray); return sched_add_entries(&scheduleArray);
} }
@ -683,6 +895,12 @@ void sf_set_current_time()
srand(g_sf_global_vars.up_time); srand(g_sf_global_vars.up_time);
} }
int sf_global_init(const char *log_filename_prefix)
{
sf_set_current_time();
return log_set_prefix(SF_G_BASE_PATH_STR, log_filename_prefix);
}
void sf_enable_thread_notify_ex(SFContext *sf_context, const bool enabled) void sf_enable_thread_notify_ex(SFContext *sf_context, const bool enabled)
{ {
struct nio_thread_data *thread_data; struct nio_thread_data *thread_data;

View File

@ -25,6 +25,9 @@
#include "fastcommon/ioevent.h" #include "fastcommon/ioevent.h"
#include "fastcommon/fast_task_queue.h" #include "fastcommon/fast_task_queue.h"
#include "sf_types.h" #include "sf_types.h"
#include "sf_proto.h"
#include "sf_global.h"
#include "sf_nio.h"
typedef void* (*sf_alloc_thread_extra_data_callback)(const int thread_index); typedef void* (*sf_alloc_thread_extra_data_callback)(const int thread_index);
typedef void (*sf_sig_quit_handler)(int sig); typedef void (*sf_sig_quit_handler)(int sig);
@ -39,29 +42,35 @@ int sf_service_init_ex2(SFContext *sf_context, const char *name,
ThreadLoopCallback thread_loop_callback, ThreadLoopCallback thread_loop_callback,
sf_accept_done_callback accept_done_callback, sf_accept_done_callback accept_done_callback,
sf_set_body_length_callback set_body_length_func, sf_set_body_length_callback set_body_length_func,
sf_deal_task_func deal_func, TaskCleanUpCallback task_cleanup_func, sf_alloc_recv_buffer_callback alloc_recv_buffer_func,
sf_send_done_callback send_done_callback,
sf_deal_task_callback deal_func, TaskCleanUpCallback task_cleanup_func,
sf_recv_timeout_callback timeout_callback, const int net_timeout_ms, sf_recv_timeout_callback timeout_callback, const int net_timeout_ms,
const int proto_header_size, const int task_arg_size, const int proto_header_size, const int task_padding_size,
TaskInitCallback init_callback, sf_release_buffer_callback const int task_arg_size, const bool double_buffers,
release_buffer_callback); const bool need_shrink_task_buffer, const bool explicit_post_recv,
TaskInitCallback init_callback, void *init_arg,
sf_release_buffer_callback release_buffer_callback);
#define sf_service_init_ex(sf_context, name, alloc_thread_extra_data_callback,\ #define sf_service_init_ex(sf_context, name, alloc_thread_extra_data_callback,\
thread_loop_callback, accept_done_callback, set_body_length_func, \ thread_loop_callback, accept_done_callback, set_body_length_func, \
deal_func, task_cleanup_func, timeout_callback, net_timeout_ms, \ send_done_callback, deal_func, task_cleanup_func, timeout_callback, \
proto_header_size, task_arg_size) \ net_timeout_ms, proto_header_size, task_arg_size) \
sf_service_init_ex2(sf_context, name, alloc_thread_extra_data_callback, \ sf_service_init_ex2(sf_context, name, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \ thread_loop_callback, accept_done_callback, set_body_length_func, \
deal_func, task_cleanup_func, timeout_callback, net_timeout_ms, \ NULL, send_done_callback, deal_func, task_cleanup_func, \
proto_header_size, task_arg_size, NULL, NULL) timeout_callback, net_timeout_ms, proto_header_size, \
0, task_arg_size, false, true, false, NULL, NULL, NULL)
#define sf_service_init(name, alloc_thread_extra_data_callback, \ #define sf_service_init(name, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \ thread_loop_callback, accept_done_callback, set_body_length_func, \
deal_func, task_cleanup_func, timeout_callback, net_timeout_ms, \ send_done_callback, deal_func, task_cleanup_func, timeout_callback, \
proto_header_size, task_arg_size) \ net_timeout_ms, proto_header_size, task_arg_size) \
sf_service_init_ex2(&g_sf_context, name, alloc_thread_extra_data_callback, \ sf_service_init_ex2(&g_sf_context, name, alloc_thread_extra_data_callback, \
thread_loop_callback, accept_done_callback, set_body_length_func, \ thread_loop_callback, accept_done_callback, set_body_length_func, NULL,\
deal_func, task_cleanup_func, timeout_callback, net_timeout_ms, \ send_done_callback, deal_func, task_cleanup_func, timeout_callback, \
proto_header_size, task_arg_size, NULL, NULL) net_timeout_ms, proto_header_size, 0, task_arg_size, false, true, \
false, NULL, NULL, NULL)
int sf_service_destroy_ex(SFContext *sf_context); int sf_service_destroy_ex(SFContext *sf_context);
@ -73,17 +82,43 @@ void sf_service_set_thread_loop_callback_ex(SFContext *sf_context,
#define sf_service_set_thread_loop_callback(thread_loop_callback) \ #define sf_service_set_thread_loop_callback(thread_loop_callback) \
sf_service_set_thread_loop_callback_ex(&g_sf_context, thread_loop_callback) sf_service_set_thread_loop_callback_ex(&g_sf_context, thread_loop_callback)
static inline void sf_service_set_smart_polling_ex(SFContext *sf_context,
const FCSmartPollingConfig *smart_polling)
{
sf_context->smart_polling = *smart_polling;
}
#define sf_service_set_smart_polling(smart_polling) \
sf_service_set_smart_polling_ex(&g_sf_context, smart_polling)
static inline void sf_service_set_connect_need_log_ex(
SFContext *sf_context, const bool need_log)
{
sf_context->connect_need_log = need_log;
}
#define sf_service_set_connect_need_log(need_log) \
sf_service_set_connect_need_log_ex(&g_sf_context, need_log)
int sf_setup_signal_handler(); int sf_setup_signal_handler();
int sf_startup_schedule(pthread_t *schedule_tid); int sf_startup_schedule(pthread_t *schedule_tid);
int sf_add_slow_log_schedule(SFSlowLogContext *slowlog_ctx); int sf_add_slow_log_schedule(SFSlowLogContext *slowlog_ctx);
void sf_set_current_time(); void sf_set_current_time();
int sf_global_init(const char *log_filename_prefix);
int sf_socket_create_server(SFListener *listener,
int af, const char *bind_addr);
void sf_socket_close_server(SFListener *listener);
struct fast_task_info *sf_socket_accept_connection(SFListener *listener);
int sf_socket_server_ex(SFContext *sf_context); int sf_socket_server_ex(SFContext *sf_context);
#define sf_socket_server() sf_socket_server_ex(&g_sf_context) #define sf_socket_server() sf_socket_server_ex(&g_sf_context)
void sf_accept_loop_ex(SFContext *sf_context, const bool block); void sf_socket_close_ex(SFContext *sf_context);
#define sf_socket_close() sf_socket_close_ex(&g_sf_context)
int sf_accept_loop_ex(SFContext *sf_context, const bool blocked);
#define sf_accept_loop() sf_accept_loop_ex(&g_sf_context, true) #define sf_accept_loop() sf_accept_loop_ex(&g_sf_context, true)
@ -115,14 +150,13 @@ void sf_notify_all_threads_ex(SFContext *sf_context);
void sf_set_sig_quit_handler(sf_sig_quit_handler quit_handler); void sf_set_sig_quit_handler(sf_sig_quit_handler quit_handler);
int sf_init_task(struct fast_task_info *task); static inline struct fast_task_info *sf_alloc_init_task_ex(
SFNetworkHandler *handler, const int fd,
static inline struct fast_task_info *sf_alloc_init_task( const int reffer_count)
SFContext *sf_context, const int sock)
{ {
struct fast_task_info *task; struct fast_task_info *task;
task = free_queue_pop(); task = free_queue_pop(&handler->fh->ctx->free_queue);
if (task == NULL) { if (task == NULL) {
logError("file: "__FILE__", line: %d, " logError("file: "__FILE__", line: %d, "
"malloc task buff failed, you should " "malloc task buff failed, you should "
@ -130,19 +164,57 @@ static inline struct fast_task_info *sf_alloc_init_task(
__LINE__); __LINE__);
return NULL; return NULL;
} }
__sync_add_and_fetch(&task->reffer_count, 1);
if (task->shrinked) {
task->shrinked = false;
sf_proto_init_task_magic(task);
}
__sync_add_and_fetch(&task->reffer_count, reffer_count);
__sync_bool_compare_and_swap(&task->canceled, 1, 0); __sync_bool_compare_and_swap(&task->canceled, 1, 0);
task->ctx = sf_context; task->handler = handler;
task->event.fd = sock; task->event.fd = fd;
return task;
}
#define sf_hold_task_ex(task, inc_count) fc_hold_task_ex(task, inc_count)
#define sf_hold_task(task) fc_hold_task(task)
#define sf_alloc_init_task(handler, fd) sf_alloc_init_task_ex(handler, fd, 1)
static inline struct fast_task_info *sf_alloc_init_server_task(
SFNetworkHandler *handler, const int fd)
{
const int reffer_count = 1;
struct fast_task_info *task;
if ((task=sf_alloc_init_task_ex(handler, fd, reffer_count)) != NULL) {
#if IOEVENT_USE_URING
FC_URING_IS_CLIENT(task) = false;
#endif
}
return task; return task;
} }
#define sf_hold_task(task) __sync_add_and_fetch(&task->reffer_count, 1) static inline struct fast_task_info *sf_alloc_init_client_task(
SFNetworkHandler *handler)
{
const int fd = -1;
const int reffer_count = 1;
struct fast_task_info *task;
if ((task=sf_alloc_init_task_ex(handler, fd, reffer_count)) != NULL) {
#if IOEVENT_USE_URING
FC_URING_IS_CLIENT(task) = true;
#endif
}
return task;
}
static inline void sf_release_task(struct fast_task_info *task) static inline void sf_release_task(struct fast_task_info *task)
{ {
//int reffer_count;
if (__sync_sub_and_fetch(&task->reffer_count, 1) == 0) { if (__sync_sub_and_fetch(&task->reffer_count, 1) == 0) {
/* /*
int free_count = free_queue_count(); int free_count = free_queue_count();
@ -152,13 +224,100 @@ static inline void sf_release_task(struct fast_task_info *task)
"used: %d, freed: %d", __LINE__, task, "used: %d, freed: %d", __LINE__, task,
alloc_count, alloc_count - free_count, free_count); alloc_count, alloc_count - free_count, free_count);
*/ */
#if IOEVENT_USE_URING
if (SF_CTX->use_io_uring) {
task->handler->close_connection(task);
__sync_fetch_and_sub(&g_sf_global_vars.
connection_stat.current_count, 1);
}
#endif
free_queue_push(task); free_queue_push(task);
} else { }
/* }
logInfo("file: "__FILE__", line: %d, "
"release task %p, current reffer: %d", static inline SFNetworkHandler *sf_get_first_network_handler_ex(
__LINE__, task, reffer_count); SFContext *sf_context)
*/ {
int i;
SFNetworkHandler *handler;
SFNetworkHandler *end;
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->handlers[i].af == AF_UNSPEC) {
continue;
}
end = sf_context->handlers[i].handlers + SF_NETWORK_HANDLER_COUNT;
for (handler=sf_context->handlers[i].handlers; handler<end; handler++) {
if (handler->enabled) {
return handler;
}
}
}
return NULL;
}
#define sf_get_first_network_handler() \
sf_get_first_network_handler_ex(&g_sf_context)
static inline SFNetworkHandler *sf_get_rdma_network_handler(
SFContext *sf_context)
{
int i;
SFNetworkHandler *handler;
for (i=0; i<SF_ADDRESS_FAMILY_COUNT; i++) {
if (sf_context->handlers[i].af != AF_UNSPEC) {
handler = sf_context->handlers[i].handlers +
SF_RDMACM_NETWORK_HANDLER_INDEX;
if (handler->enabled) {
return handler;
}
}
}
return NULL;
}
static inline SFNetworkHandler *sf_get_rdma_network_handler2(
SFContext *sf_context1, SFContext *sf_context2)
{
SFNetworkHandler *handler;
if ((handler=sf_get_rdma_network_handler(sf_context1)) != NULL) {
return handler;
}
return sf_get_rdma_network_handler(sf_context2);
}
static inline SFNetworkHandler *sf_get_rdma_network_handler3(
SFContext *sf_context1, SFContext *sf_context2,
SFContext *sf_context3)
{
SFNetworkHandler *handler;
if ((handler=sf_get_rdma_network_handler(sf_context1)) != NULL) {
return handler;
}
if ((handler=sf_get_rdma_network_handler(sf_context2)) != NULL) {
return handler;
}
return sf_get_rdma_network_handler(sf_context3);
}
static inline bool sf_get_double_buffers_flag(FCServerGroupInfo *server_group)
{
if (server_group->comm_type == fc_comm_type_sock) {
#if IOEVENT_USE_URING
return true;
#else
return false;
#endif
} else { //RDMA
return true;
} }
} }

View File

@ -119,6 +119,7 @@ int sf_sharding_htable_init_ex(SFHtableShardingContext *sharding_ctx,
const SFShardingHtableKeyType key_type, const SFShardingHtableKeyType key_type,
sf_sharding_htable_insert_callback insert_callback, sf_sharding_htable_insert_callback insert_callback,
sf_sharding_htable_find_callback find_callback, sf_sharding_htable_find_callback find_callback,
sf_sharding_htable_delete_callback delete_callback,
sf_sharding_htable_accept_reclaim_callback reclaim_callback, sf_sharding_htable_accept_reclaim_callback reclaim_callback,
const int sharding_count, const int64_t htable_capacity, const int sharding_count, const int64_t htable_capacity,
const int allocator_count, const int element_size, const int allocator_count, const int element_size,
@ -132,6 +133,7 @@ int sf_sharding_htable_init_ex(SFHtableShardingContext *sharding_ctx,
if (element_limit <= 0) { if (element_limit <= 0) {
element_limit = 1000 * 1000; element_limit = 1000 * 1000;
} }
if ((result=init_allocators(sharding_ctx, allocator_count, if ((result=init_allocators(sharding_ctx, allocator_count,
element_size, element_limit)) != 0) element_size, element_limit)) != 0)
{ {
@ -149,7 +151,10 @@ int sf_sharding_htable_init_ex(SFHtableShardingContext *sharding_ctx,
sharding_ctx->key_type = key_type; sharding_ctx->key_type = key_type;
sharding_ctx->insert_callback = insert_callback; sharding_ctx->insert_callback = insert_callback;
sharding_ctx->find_callback = find_callback; sharding_ctx->find_callback = find_callback;
sharding_ctx->delete_callback = delete_callback;
sharding_ctx->accept_reclaim_callback = reclaim_callback; sharding_ctx->accept_reclaim_callback = reclaim_callback;
sharding_ctx->sharding_reclaim.enabled = (delete_callback == NULL);
sharding_ctx->sharding_reclaim.elt_water_mark = sharding_ctx->sharding_reclaim.elt_water_mark =
per_elt_limit * low_water_mark_ratio; per_elt_limit * low_water_mark_ratio;
sharding_ctx->sharding_reclaim.min_ttl_ms = min_ttl_ms; sharding_ctx->sharding_reclaim.min_ttl_ms = min_ttl_ms;
@ -280,7 +285,7 @@ static SFShardingHashEntry *hash_entry_reclaim(SFHtableSharding *sharding)
} }
if (reclaim_count > 0) { if (reclaim_count > 0) {
logInfo("sharding index: %d, element_count: %"PRId64", " logDebug("sharding index: %d, element_count: %"PRId64", "
"reclaim_ttl_ms: %"PRId64" ms, reclaim_count: %"PRId64", " "reclaim_ttl_ms: %"PRId64" ms, reclaim_count: %"PRId64", "
"reclaim_limit: %"PRId64, (int)(sharding - sharding->ctx-> "reclaim_limit: %"PRId64, (int)(sharding - sharding->ctx->
sharding_array.entries), sharding->element_count, sharding_array.entries), sharding->element_count,
@ -291,14 +296,16 @@ static SFShardingHashEntry *hash_entry_reclaim(SFHtableSharding *sharding)
} }
static inline SFShardingHashEntry *htable_entry_alloc( static inline SFShardingHashEntry *htable_entry_alloc(
SFHtableShardingContext *sharding_ctx,
SFHtableSharding *sharding) SFHtableSharding *sharding)
{ {
SFShardingHashEntry *entry; SFShardingHashEntry *entry;
int64_t current_time_ms; int64_t current_time_ms;
int64_t last_reclaim_time_ms; int64_t last_reclaim_time_ms;
if (sharding->element_count > sharding->ctx-> if (sharding_ctx->sharding_reclaim.enabled &&
sharding_reclaim.elt_water_mark) (sharding->element_count > sharding->ctx->
sharding_reclaim.elt_water_mark))
{ {
current_time_ms = 1000LL * (int64_t)get_current_time(); current_time_ms = 1000LL * (int64_t)get_current_time();
last_reclaim_time_ms = FC_ATOMIC_GET(sharding->last_reclaim_time_ms); last_reclaim_time_ms = FC_ATOMIC_GET(sharding->last_reclaim_time_ms);
@ -325,7 +332,6 @@ static inline SFShardingHashEntry *htable_entry_alloc(
#define SET_SHARDING_AND_BUCKET(sharding_ctx, key) \ #define SET_SHARDING_AND_BUCKET(sharding_ctx, key) \
SFHtableSharding *sharding; \ SFHtableSharding *sharding; \
struct fc_list_head *bucket; \ struct fc_list_head *bucket; \
SFShardingHashEntry *entry; \
uint64_t hash_code; \ uint64_t hash_code; \
\ \
hash_code = sf_sharding_htable_key_ids_one == sharding_ctx-> \ hash_code = sf_sharding_htable_key_ids_one == sharding_ctx-> \
@ -340,6 +346,7 @@ void *sf_sharding_htable_find(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg) *sharding_ctx, const SFTwoIdsHashKey *key, void *arg)
{ {
void *data; void *data;
SFShardingHashEntry *entry;
SET_SHARDING_AND_BUCKET(sharding_ctx, key); SET_SHARDING_AND_BUCKET(sharding_ctx, key);
PTHREAD_MUTEX_LOCK(&sharding->lock); PTHREAD_MUTEX_LOCK(&sharding->lock);
@ -354,9 +361,43 @@ void *sf_sharding_htable_find(SFHtableShardingContext
return data; return data;
} }
int sf_sharding_htable_delete(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg)
{
int result;
SFShardingHashEntry *entry;
if (sharding_ctx->delete_callback != NULL) {
SET_SHARDING_AND_BUCKET(sharding_ctx, key);
PTHREAD_MUTEX_LOCK(&sharding->lock);
entry = htable_find(sharding_ctx, key, bucket);
if (entry != NULL) {
if (sharding_ctx->delete_callback(entry, arg)) {
fc_list_del_init(&entry->dlinks.htable);
if (sharding_ctx->sharding_reclaim.enabled) {
fc_list_del_init(&entry->dlinks.lru);
}
fast_mblock_free_object(sharding->allocator, entry);
sharding->element_count--;
}
result = 0;
} else {
result = ENOENT;
}
PTHREAD_MUTEX_UNLOCK(&sharding->lock);
} else {
logError("file: "__FILE__", line: %d, "
"delete callback is NULL!", __LINE__);
result = EINVAL;
}
return result;
}
int sf_sharding_htable_insert(SFHtableShardingContext int sf_sharding_htable_insert(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg) *sharding_ctx, const SFTwoIdsHashKey *key, void *arg)
{ {
SFShardingHashEntry *entry;
bool new_create; bool new_create;
int result; int result;
SET_SHARDING_AND_BUCKET(sharding_ctx, key); SET_SHARDING_AND_BUCKET(sharding_ctx, key);
@ -364,7 +405,7 @@ int sf_sharding_htable_insert(SFHtableShardingContext
PTHREAD_MUTEX_LOCK(&sharding->lock); PTHREAD_MUTEX_LOCK(&sharding->lock);
do { do {
if ((entry=htable_find(sharding_ctx, key, bucket)) == NULL) { if ((entry=htable_find(sharding_ctx, key, bucket)) == NULL) {
if ((entry=htable_entry_alloc(sharding)) == NULL) { if ((entry=htable_entry_alloc(sharding_ctx, sharding)) == NULL) {
result = ENOMEM; result = ENOMEM;
break; break;
} }
@ -372,11 +413,15 @@ int sf_sharding_htable_insert(SFHtableShardingContext
new_create = true; new_create = true;
entry->key = *key; entry->key = *key;
htable_insert(sharding_ctx, entry, bucket); htable_insert(sharding_ctx, entry, bucket);
if (sharding_ctx->sharding_reclaim.enabled) {
fc_list_add_tail(&entry->dlinks.lru, &sharding->lru); fc_list_add_tail(&entry->dlinks.lru, &sharding->lru);
}
} else { } else {
new_create = false; new_create = false;
if (sharding_ctx->sharding_reclaim.enabled) {
fc_list_move_tail(&entry->dlinks.lru, &sharding->lru); fc_list_move_tail(&entry->dlinks.lru, &sharding->lru);
} }
}
entry->last_update_time_ms = 1000LL * (int64_t)get_current_time(); entry->last_update_time_ms = 1000LL * (int64_t)get_current_time();
result = sharding_ctx->insert_callback( result = sharding_ctx->insert_callback(

View File

@ -18,7 +18,6 @@
#include <limits.h> #include <limits.h>
#include <sys/types.h> #include <sys/types.h>
#include <sys/stat.h>
#include "fastcommon/common_define.h" #include "fastcommon/common_define.h"
#include "fastcommon/fc_list.h" #include "fastcommon/fc_list.h"
#include "fastcommon/pthread_func.h" #include "fastcommon/pthread_func.h"
@ -37,6 +36,9 @@ typedef int (*sf_sharding_htable_insert_callback)
typedef void *(*sf_sharding_htable_find_callback) typedef void *(*sf_sharding_htable_find_callback)
(struct sf_sharding_hash_entry *entry, void *arg); (struct sf_sharding_hash_entry *entry, void *arg);
typedef bool (*sf_sharding_htable_delete_callback)
(struct sf_sharding_hash_entry *entry, void *arg);
typedef bool (*sf_sharding_htable_accept_reclaim_callback) typedef bool (*sf_sharding_htable_accept_reclaim_callback)
(struct sf_sharding_hash_entry *entry); (struct sf_sharding_hash_entry *entry);
@ -91,6 +93,7 @@ typedef struct sf_htable_sharding_context {
int64_t max_ttl_ms; int64_t max_ttl_ms;
double elt_ttl_ms; double elt_ttl_ms;
int elt_water_mark; //trigger reclaim when elements exceeds water mark int elt_water_mark; //trigger reclaim when elements exceeds water mark
bool enabled;
} sharding_reclaim; } sharding_reclaim;
struct { struct {
@ -101,6 +104,7 @@ typedef struct sf_htable_sharding_context {
SFShardingHtableKeyType key_type; //id count in the hash entry SFShardingHtableKeyType key_type; //id count in the hash entry
sf_sharding_htable_insert_callback insert_callback; sf_sharding_htable_insert_callback insert_callback;
sf_sharding_htable_find_callback find_callback; sf_sharding_htable_find_callback find_callback;
sf_sharding_htable_delete_callback delete_callback;
sf_sharding_htable_accept_reclaim_callback accept_reclaim_callback; sf_sharding_htable_accept_reclaim_callback accept_reclaim_callback;
SFHtableShardingArray sharding_array; SFHtableShardingArray sharding_array;
} SFHtableShardingContext; } SFHtableShardingContext;
@ -113,6 +117,7 @@ extern "C" {
const SFShardingHtableKeyType key_type, const SFShardingHtableKeyType key_type,
sf_sharding_htable_insert_callback insert_callback, sf_sharding_htable_insert_callback insert_callback,
sf_sharding_htable_find_callback find_callback, sf_sharding_htable_find_callback find_callback,
sf_sharding_htable_delete_callback delete_callback,
sf_sharding_htable_accept_reclaim_callback reclaim_callback, sf_sharding_htable_accept_reclaim_callback reclaim_callback,
const int sharding_count, const int64_t htable_capacity, const int sharding_count, const int64_t htable_capacity,
const int allocator_count, const int element_size, const int allocator_count, const int element_size,
@ -123,6 +128,7 @@ extern "C" {
*sharding_ctx, const SFShardingHtableKeyType key_type, *sharding_ctx, const SFShardingHtableKeyType key_type,
sf_sharding_htable_insert_callback insert_callback, sf_sharding_htable_insert_callback insert_callback,
sf_sharding_htable_find_callback find_callback, sf_sharding_htable_find_callback find_callback,
sf_sharding_htable_delete_callback delete_callback,
sf_sharding_htable_accept_reclaim_callback reclaim_callback, sf_sharding_htable_accept_reclaim_callback reclaim_callback,
const int sharding_count, const int64_t htable_capacity, const int sharding_count, const int64_t htable_capacity,
const int allocator_count, const int element_size, const int allocator_count, const int element_size,
@ -131,10 +137,10 @@ extern "C" {
{ {
const double low_water_mark_ratio = 0.10; const double low_water_mark_ratio = 0.10;
return sf_sharding_htable_init_ex(sharding_ctx, key_type, return sf_sharding_htable_init_ex(sharding_ctx, key_type,
insert_callback, find_callback, reclaim_callback, insert_callback, find_callback, delete_callback,
sharding_count, htable_capacity, allocator_count, reclaim_callback, sharding_count, htable_capacity,
element_size, element_limit, min_ttl_ms, max_ttl_ms, allocator_count, element_size, element_limit,
low_water_mark_ratio); min_ttl_ms, max_ttl_ms, low_water_mark_ratio);
} }
int sf_sharding_htable_insert(SFHtableShardingContext int sf_sharding_htable_insert(SFHtableShardingContext
@ -143,6 +149,9 @@ extern "C" {
void *sf_sharding_htable_find(SFHtableShardingContext void *sf_sharding_htable_find(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg); *sharding_ctx, const SFTwoIdsHashKey *key, void *arg);
int sf_sharding_htable_delete(SFHtableShardingContext
*sharding_ctx, const SFTwoIdsHashKey *key, void *arg);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

94
src/sf_shared_mbuffer.c Normal file
View File

@ -0,0 +1,94 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the Lesser GNU General Public License, version 3
* or later ("LGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the Lesser GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include "sf_shared_mbuffer.h"
static int sf_shared_mbuffer_alloc_init(void *element, void *args)
{
SFSharedMBuffer *buffer;
buffer = (SFSharedMBuffer *)((char *)element +
sizeof(struct fast_allocator_wrapper));
buffer->ctx = (SFSharedMBufferContext *)args;
return 0;
}
int sf_shared_mbuffer_init_ex(SFSharedMBufferContext *context,
const char *name_prefix, const int buff_extra_size,
const int min_buff_size, const int max_buff_size,
const int min_alloc_once, const int64_t memory_limit,
const bool need_lock)
{
const double expect_usage_ratio = 0.75;
const int reclaim_interval = 1;
struct fast_region_info regions[32];
struct fast_mblock_object_callbacks object_callbacks;
int count;
int start;
int end;
int alloc_once;
int buff_size;
int i;
alloc_once = (4 * 1024 * 1024) / max_buff_size;
if (alloc_once == 0) {
alloc_once = min_alloc_once;
} else {
i = min_alloc_once;
while (i < alloc_once) {
i *= 2;
}
alloc_once = i;
}
count = 1;
buff_size = min_buff_size;
while (buff_size < max_buff_size) {
buff_size *= 2;
++count;
alloc_once *= 2;
}
buff_size = min_buff_size;
start = 0;
end = buff_extra_size + buff_size;
FAST_ALLOCATOR_INIT_REGION(regions[0], start, end,
end - start, alloc_once);
//logInfo("[1] start: %d, end: %d, alloc_once: %d", start, end, alloc_once);
start = end;
for (i=1; i<count; i++) {
buff_size *= 2;
alloc_once /= 2;
end = buff_extra_size + buff_size;
FAST_ALLOCATOR_INIT_REGION(regions[i], start, end,
end - start, alloc_once);
//logInfo("[%d] start: %d, end: %d, alloc_once: %d", i + 1, start, end, alloc_once);
start = end;
}
object_callbacks.init_func = sf_shared_mbuffer_alloc_init;
object_callbacks.destroy_func = NULL;
object_callbacks.args = context;
return fast_allocator_init_ex(&context->allocator, name_prefix,
sizeof(SFSharedMBuffer), &object_callbacks, regions, count,
memory_limit, expect_usage_ratio, reclaim_interval, need_lock);
}
void sf_shared_mbuffer_destroy(SFSharedMBufferContext *context)
{
fast_allocator_destroy(&context->allocator);
}

114
src/sf_shared_mbuffer.h Normal file
View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2020 YuQing <384681@qq.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the Lesser GNU General Public License, version 3
* or later ("LGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the Lesser GNU General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifndef _SF_SHARED_MBUFFER_H__
#define _SF_SHARED_MBUFFER_H__
#include "fastcommon/fc_list.h"
#include "fastcommon/fast_task_queue.h"
#include "fastcommon/shared_func.h"
#include "fastcommon/logger.h"
#include "fastcommon/fast_allocator.h"
typedef struct sf_shared_mbuffer_context {
struct fast_allocator_context allocator;
} SFSharedMBufferContext;
typedef struct sf_shared_mbuffer {
int length;
volatile int reffer_count;
SFSharedMBufferContext *ctx;
char buff[0]; //must be last
} SFSharedMBuffer;
#ifdef __cplusplus
extern "C" {
#endif
#define sf_shared_mbuffer_init(context, name_prefix, buff_extra_size, \
min_buff_size, max_buff_size, min_alloc_once, memory_limit) \
sf_shared_mbuffer_init_ex(context, name_prefix, buff_extra_size, \
min_buff_size, max_buff_size, min_alloc_once, memory_limit, true)
int sf_shared_mbuffer_init_ex(SFSharedMBufferContext *context,
const char *name_prefix, const int buff_extra_size,
const int min_buff_size, const int max_buff_size,
const int min_alloc_once, const int64_t memory_limit,
const bool need_lock);
void sf_shared_mbuffer_destroy(SFSharedMBufferContext *context);
#define sf_shared_mbuffer_alloc(context, buffer_size) \
sf_shared_mbuffer_alloc_ex(context, buffer_size, 1)
static inline SFSharedMBuffer *sf_shared_mbuffer_alloc_ex(
SFSharedMBufferContext *context, const int buffer_size,
const int init_reffer_count)
{
SFSharedMBuffer *buffer;
int sleep_ms;
sleep_ms = 5;
while ((buffer=fast_allocator_alloc(&context->allocator,
buffer_size)) == NULL)
{
if (sleep_ms < 100) {
sleep_ms *= 2;
}
fc_sleep_ms(sleep_ms);
}
if (init_reffer_count > 0) {
__sync_add_and_fetch(&buffer->reffer_count, init_reffer_count);
}
/*
logInfo("file: "__FILE__", line: %d, "
"alloc shared buffer: %p, buff: %p, reffer_count: %d",
__LINE__, buffer, buffer->buff, __sync_add_and_fetch(&buffer->reffer_count, 0));
*/
return buffer;
}
static inline void sf_shared_mbuffer_hold(SFSharedMBuffer *buffer)
{
__sync_add_and_fetch(&buffer->reffer_count, 1);
}
static inline void sf_shared_mbuffer_release(SFSharedMBuffer *buffer)
{
if (__sync_sub_and_fetch(&buffer->reffer_count, 1) == 0) {
/*
logInfo("file: "__FILE__", line: %d, "
"free shared buffer: %p", __LINE__, buffer);
*/
fast_allocator_free(&buffer->ctx->allocator, buffer);
}
}
static inline void sf_release_task_shared_mbuffer(struct fast_task_info *task)
{
SFSharedMBuffer *mbuffer;
mbuffer = fc_list_entry(task->recv_body, SFSharedMBuffer, buff);
sf_shared_mbuffer_release(mbuffer);
task->recv_body = NULL;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -34,41 +34,170 @@
#define SF_SERVER_TASK_TYPE_CHANNEL_HOLDER 101 //for request idempotency #define SF_SERVER_TASK_TYPE_CHANNEL_HOLDER 101 //for request idempotency
#define SF_SERVER_TASK_TYPE_CHANNEL_USER 102 //for request idempotency #define SF_SERVER_TASK_TYPE_CHANNEL_USER 102 //for request idempotency
typedef void (*sf_accept_done_callback)(struct fast_task_info *task, #define SF_ADDRESS_FAMILY_COUNT 2
const bool bInnerPort); #define SF_IPV4_ADDRESS_FAMILY_INDEX 0
#define SF_IPV6_ADDRESS_FAMILY_INDEX 1
#define SF_NETWORK_HANDLER_COUNT 2
#define SF_SOCKET_NETWORK_HANDLER_INDEX 0
#define SF_RDMACM_NETWORK_HANDLER_INDEX 1
#define SF_BINLOG_BUFFER_PRODUCER_DATA_LENGTH(bf) ((bf).data_end - (bf).buff)
#define SF_BINLOG_BUFFER_PRODUCER_BUFF_REMAIN(bf) ((bf).buff_end - (bf).data_end)
#define SF_BINLOG_BUFFER_CONSUMER_DATA_LENGTH(bf) ((bf).current - (bf).buff)
#define SF_BINLOG_BUFFER_CONSUMER_DATA_REMAIN(bf) ((bf).data_end - (bf).current)
typedef int (*sf_accept_done_callback)(struct fast_task_info *task,
const in_addr_64_t client_addr, const bool bInnerPort);
typedef int (*sf_set_body_length_callback)(struct fast_task_info *task); typedef int (*sf_set_body_length_callback)(struct fast_task_info *task);
typedef int (*sf_deal_task_func)(struct fast_task_info *task, const int stage); typedef char *(*sf_alloc_recv_buffer_callback)(struct fast_task_info *task,
const int buff_size, bool *new_alloc);
typedef int (*sf_deal_task_callback)(struct fast_task_info *task, const int stage);
typedef int (*sf_recv_timeout_callback)(struct fast_task_info *task); typedef int (*sf_recv_timeout_callback)(struct fast_task_info *task);
typedef int (*sf_send_done_callback)(struct fast_task_info *task,
const int length, int *next_stage);
typedef void (*sf_connect_done_callback)(struct fast_task_info *task,
const int err_no);
/* calback for release iovec buffer */ /* calback for release iovec buffer */
typedef void (*sf_release_buffer_callback)(struct fast_task_info *task); typedef void (*sf_release_buffer_callback)(struct fast_task_info *task);
typedef int (*sf_error_handler_callback)(const int errnum); typedef int (*sf_error_handler_callback)(const int errnum);
typedef enum {
sf_comm_action_continue = 'c',
sf_comm_action_break = 'b',
sf_comm_action_finish = 'f'
} SFCommAction;
typedef enum {
sf_address_family_auto = 0,
sf_address_family_ipv4 = 1,
sf_address_family_ipv6 = 2,
sf_address_family_both = 3
} SFAddressFamily;
struct ibv_pd;
struct sf_listener;
typedef int (*sf_get_connection_size_callback)();
typedef int (*sf_init_connection_callback)(
struct fast_task_info *task, void *arg);
#define sf_alloc_pd_callback fc_alloc_pd_callback
typedef int (*sf_create_server_callback)(struct sf_listener
*listener, int af, const char *bind_addr);
typedef void (*sf_close_server_callback)(struct sf_listener *listener);
typedef struct fast_task_info * (*sf_accept_connection_callback)(
struct sf_listener *listener);
typedef int (*sf_async_connect_server_callback)(struct fast_task_info *task);
typedef int (*sf_async_connect_check_callback)(struct fast_task_info *task);
typedef void (*sf_close_connection_callback)(struct fast_task_info *task);
typedef ssize_t (*sf_send_data_callback)(struct fast_task_info *task,
SFCommAction *action, bool *send_done);
typedef ssize_t (*sf_recv_data_callback)(struct fast_task_info *task,
const bool call_post_recv, SFCommAction *action);
typedef int (*sf_post_recv_callback)(struct fast_task_info *task);
struct sf_network_handler;
typedef struct sf_listener {
struct sf_network_handler *handler;
int port;
bool enabled;
bool is_inner;
union {
int sock; //for socket
void *id; //for rdma_cm
};
struct sockaddr_in inaddr; //for accept
} SFListener;
struct sf_context;
struct sf_address_family_handler;
typedef struct sf_network_handler {
bool enabled;
bool explicit_post_recv;
FCCommunicationType comm_type;
struct sf_address_family_handler *fh;
struct ibv_pd *pd;
SFListener inner;
SFListener outer;
/* for server side */
sf_get_connection_size_callback get_connection_size;
sf_init_connection_callback init_connection;
sf_alloc_pd_callback alloc_pd;
sf_create_server_callback create_server;
sf_close_server_callback close_server;
sf_accept_connection_callback accept_connection;
/* for client side */
sf_async_connect_server_callback async_connect_server;
sf_async_connect_check_callback async_connect_check;
/* server and client both */
sf_close_connection_callback close_connection;
sf_send_data_callback send_data;
sf_recv_data_callback recv_data;
sf_post_recv_callback post_recv; //for rdma
} SFNetworkHandler;
typedef struct sf_nio_callbacks {
TaskCleanUpCallback task_cleanup;
sf_deal_task_callback deal_task;
sf_set_body_length_callback set_body_length;
sf_alloc_recv_buffer_callback alloc_recv_buffer;
sf_accept_done_callback accept_done;
sf_connect_done_callback connect_done;
sf_send_done_callback send_done;
sf_recv_timeout_callback task_timeout;
sf_release_buffer_callback release_buffer;
} SFNIOCallbacks;
typedef struct sf_address_family_handler {
int af; //AF_UNSPEC for disabled
SFNetworkHandler handlers[SF_NETWORK_HANDLER_COUNT];
char inner_bind_addr[IP_ADDRESS_SIZE];
char outer_bind_addr[IP_ADDRESS_SIZE];
struct sf_context *ctx;
} SFAddressFamilyHandler;
typedef struct sf_net_buffer_config {
int connect_timeout;
int network_timeout;
int max_connections;
int max_pkg_size;
int min_buff_size;
int max_buff_size;
} SFNetBufferConfig;
typedef struct sf_context { typedef struct sf_context {
char name[64]; char name[64];
struct nio_thread_data *thread_data; struct nio_thread_data *thread_data;
volatile int thread_count; volatile int thread_count;
int outer_sock;
int inner_sock;
int outer_port; bool is_client; //since v1.2.5
int inner_port; bool use_io_uring; //since v1.2.9
bool use_send_zc; //since v1.2.9
SFAddressFamily address_family;
SFAddressFamilyHandler handlers[SF_ADDRESS_FAMILY_COUNT];
SFNetBufferConfig net_buffer_cfg;
int accept_threads; int accept_threads;
int work_threads; int work_threads;
char inner_bind_addr[IP_ADDRESS_SIZE];
char outer_bind_addr[IP_ADDRESS_SIZE];
int header_size; int header_size;
bool remove_from_ready_list;
bool realloc_task_buffer; bool realloc_task_buffer;
sf_deal_task_func deal_task; bool connect_need_log; //for client connect
sf_set_body_length_callback set_body_length; FCSmartPollingConfig smart_polling;
sf_accept_done_callback accept_done_func;
TaskCleanUpCallback task_cleanup_func; SFNIOCallbacks callbacks;
sf_recv_timeout_callback timeout_callback; struct fast_task_queue free_queue;
sf_release_buffer_callback release_buffer_callback;
} SFContext; } SFContext;
typedef struct { typedef struct {
@ -110,7 +239,8 @@ typedef struct sf_binlog_file_position {
typedef struct server_binlog_buffer { typedef struct server_binlog_buffer {
char *buff; //the buffer pointer char *buff; //the buffer pointer
char *current; //for the consumer char *current; //for the consumer
char *end; //data end ptr char *data_end; //data end ptr
char *buff_end; //buffer end ptr
int size; //the buffer size (capacity) int size; //the buffer size (capacity)
} SFBinlogBuffer; } SFBinlogBuffer;
@ -237,11 +367,41 @@ typedef struct sf_cluster_config {
typedef struct sf_synchronize_context { typedef struct sf_synchronize_context {
pthread_lock_cond_pair_t lcp; pthread_lock_cond_pair_t lcp;
int result;
union { union {
bool finished; bool finished;
int result; bool ready;
int waiting_count; int waiting_count;
}; };
} SFSynchronizeContext; } SFSynchronizeContext;
typedef enum sf_election_quorum {
sf_election_quorum_auto,
sf_election_quorum_any,
sf_election_quorum_majority
} SFElectionQuorum;
typedef enum sf_replication_quorum {
sf_replication_quorum_auto,
sf_replication_quorum_any,
sf_replication_quorum_majority,
sf_replication_quorum_smart
} SFReplicationQuorum;
typedef struct sf_block_key {
int64_t oid; //object id
int64_t offset; //aligned by block size
uint64_t hash_code;
} SFBlockKey;
typedef struct sf_slice_size {
int offset; //offset within the block
int length; //slice length
} SFSliceSize;
typedef struct sf_block_slice_key_info {
SFBlockKey block;
SFSliceSize slice;
} SFBlockSliceKeyInfo;
#endif #endif

View File

@ -281,7 +281,7 @@ int sf_logger_init(LogContext *pContext, const char *filename_prefix)
return 0; return 0;
} }
ScheduleEntry *sf_logger_set_schedule_entry(struct log_context *pContext, ScheduleEntry *sf_logger_set_schedule_entries(struct log_context *pContext,
SFLogConfig *log_cfg, ScheduleEntry *pScheduleEntry) SFLogConfig *log_cfg, ScheduleEntry *pScheduleEntry)
{ {
INIT_SCHEDULE_ENTRY(*pScheduleEntry, sched_generate_next_id(), INIT_SCHEDULE_ENTRY(*pScheduleEntry, sched_generate_next_id(),
@ -311,6 +311,8 @@ ScheduleEntry *sf_logger_set_schedule_entry(struct log_context *pContext,
const char *sf_strerror(const int errnum) const char *sf_strerror(const int errnum)
{ {
switch (errnum) { switch (errnum) {
case SF_CLUSTER_ERROR_BINLOG_MISSED:
return "binlog missed";
case SF_CLUSTER_ERROR_BINLOG_INCONSISTENT: case SF_CLUSTER_ERROR_BINLOG_INCONSISTENT:
return "binlog inconsistent"; return "binlog inconsistent";
case SF_CLUSTER_ERROR_LEADER_INCONSISTENT: case SF_CLUSTER_ERROR_LEADER_INCONSISTENT:
@ -329,6 +331,8 @@ const char *sf_strerror(const int errnum)
return STRERROR(EINVAL); return STRERROR(EINVAL);
case SF_ERROR_EAGAIN: case SF_ERROR_EAGAIN:
return STRERROR(EAGAIN); return STRERROR(EAGAIN);
case SF_ERROR_EINPROGRESS:
return STRERROR(EINPROGRESS);
case SF_ERROR_EOVERFLOW: case SF_ERROR_EOVERFLOW:
return STRERROR(EOVERFLOW); return STRERROR(EOVERFLOW);
case SF_ERROR_ENODATA: case SF_ERROR_ENODATA:

View File

@ -96,14 +96,14 @@ void sf_parse_cmd_option_bool(int argc, char *argv[],
int sf_logger_init(LogContext *pContext, const char *filename_prefix); int sf_logger_init(LogContext *pContext, const char *filename_prefix);
ScheduleEntry *sf_logger_set_schedule_entry(struct log_context *pContext, ScheduleEntry *sf_logger_set_schedule_entries(struct log_context *pContext,
SFLogConfig *log_cfg, ScheduleEntry *pScheduleEntry); SFLogConfig *log_cfg, ScheduleEntry *pScheduleEntry);
static inline void sf_setup_schedule(struct log_context *pContext, static inline void sf_logger_setup_schedule(struct log_context *pContext,
SFLogConfig *log_cfg, ScheduleArray *scheduleArray) SFLogConfig *log_cfg, ScheduleArray *scheduleArray)
{ {
ScheduleEntry *scheduleEntry; ScheduleEntry *scheduleEntry;
scheduleEntry = sf_logger_set_schedule_entry(pContext, scheduleEntry = sf_logger_set_schedule_entries(pContext,
log_cfg, scheduleArray->entries); log_cfg, scheduleArray->entries);
scheduleArray->count = scheduleEntry - scheduleArray->entries; scheduleArray->count = scheduleEntry - scheduleArray->entries;
} }
@ -117,12 +117,20 @@ static inline int sf_unify_errno(const int errnum)
return SF_ERROR_EINVAL; return SF_ERROR_EINVAL;
case EAGAIN: case EAGAIN:
return SF_ERROR_EAGAIN; return SF_ERROR_EAGAIN;
case EINPROGRESS:
return SF_ERROR_EINPROGRESS;
case EOVERFLOW: case EOVERFLOW:
return SF_ERROR_EOVERFLOW; return SF_ERROR_EOVERFLOW;
case EOPNOTSUPP: case EOPNOTSUPP:
return SF_ERROR_EOPNOTSUPP; return SF_ERROR_EOPNOTSUPP;
case ENODATA: case ENODATA:
return SF_ERROR_ENODATA; return SF_ERROR_ENODATA;
case ENOLINK:
return SF_ERROR_ENOLINK;
case ENOTEMPTY:
return SF_ERROR_ENOTEMPTY;
case ELOOP:
return SF_ERROR_ELOOP;
default: default:
return errnum; return errnum;
} }
@ -141,6 +149,8 @@ static inline int sf_localize_errno(int errnum)
return EINVAL; return EINVAL;
case SF_ERROR_EAGAIN: case SF_ERROR_EAGAIN:
return EAGAIN; return EAGAIN;
case SF_ERROR_EINPROGRESS:
return EINPROGRESS;
case SF_ERROR_EOVERFLOW: case SF_ERROR_EOVERFLOW:
return EOVERFLOW; return EOVERFLOW;
case SF_ERROR_EOPNOTSUPP: case SF_ERROR_EOPNOTSUPP:
@ -149,6 +159,12 @@ static inline int sf_localize_errno(int errnum)
return ENODATA; return ENODATA;
case SF_SESSION_ERROR_NOT_EXIST: case SF_SESSION_ERROR_NOT_EXIST:
return EPERM; return EPERM;
case SF_ERROR_ENOLINK:
return ENOLINK;
case SF_ERROR_ENOTEMPTY:
return ENOTEMPTY;
case SF_ERROR_ELOOP:
return ELOOP;
default: default:
return errnum; return errnum;
} }