Compare commits
No commits in common. "master" and "V6.05" have entirely different histories.
|
|
@ -7,6 +7,9 @@ client/Makefile
|
||||||
# client/fdfs_link_library.sh.in
|
# client/fdfs_link_library.sh.in
|
||||||
client/fdfs_link_library.sh
|
client/fdfs_link_library.sh
|
||||||
|
|
||||||
|
# Prerequisites
|
||||||
|
*.d
|
||||||
|
|
||||||
# Compiled Object files
|
# Compiled Object files
|
||||||
*.slo
|
*.slo
|
||||||
*.lo
|
*.lo
|
||||||
|
|
@ -101,8 +104,3 @@ php_client/run-tests.php
|
||||||
# fastdfs runtime paths
|
# fastdfs runtime paths
|
||||||
data/
|
data/
|
||||||
logs/
|
logs/
|
||||||
|
|
||||||
# others
|
|
||||||
*.pid
|
|
||||||
*.swp
|
|
||||||
*.swo
|
|
||||||
|
|
|
||||||
59
HISTORY
59
HISTORY
|
|
@ -1,63 +1,4 @@
|
||||||
|
|
||||||
Version 6.12.2 2024-09-16
|
|
||||||
* use libfastcommon V1.75 and libserverframe 1.2.5
|
|
||||||
|
|
||||||
Version 6.12.1 2024-03-06
|
|
||||||
* adapt to libserverframe 1.2.3
|
|
||||||
* bugfixed: notify_leader_changed support IPv6 correctly
|
|
||||||
* log square quoted IPv6 address
|
|
||||||
|
|
||||||
Version 6.12.0 2024-02-12
|
|
||||||
* bugfixed: parse ip and port use parseAddress instead of splitEx
|
|
||||||
* bugfixed: fdfs_server_info_to_string support IPv6 correctly
|
|
||||||
* check filename duplicate by hashtable instead of file system access
|
|
||||||
|
|
||||||
Version 6.11.0 2023-12-10
|
|
||||||
* support IPv6, config item: address_family in tracker.conf and storage.conf
|
|
||||||
use libfastcommon V1.71 and libserverframe 1.2.1
|
|
||||||
* storage.conf can specify the storage server ID for NAT network
|
|
||||||
|
|
||||||
Version 6.10.0 2023-09-07
|
|
||||||
* use libfastcommon V1.70 and libserverframe 1.2.0
|
|
||||||
|
|
||||||
Version 6.9.5 2023-06-05
|
|
||||||
* fix possible out-of-bounds issues with array access
|
|
||||||
* fix realloc mistakes to avoid memory leaks
|
|
||||||
* add ExecStartPost=/bin/sleep 0.1 to systemd service files
|
|
||||||
* fdht_client/fdht_func.c: fixed compile error
|
|
||||||
|
|
||||||
Version 6.9.4 2023-02-15
|
|
||||||
* use epoll edge trigger to resolve github issues #608
|
|
||||||
* bugfixed: report connections' current_count and max_count correctly
|
|
||||||
|
|
||||||
Version 6.9.3 2022-12-24
|
|
||||||
* use prctl to set pthread name under Linux
|
|
||||||
|
|
||||||
Version 6.9.2 2022-11-28
|
|
||||||
* space size such as total_mb and free_mb use int64_t instead of int
|
|
||||||
* bugfixed: log connection ip_addr and port correctly
|
|
||||||
* output port with format %u instead %d
|
|
||||||
|
|
||||||
Version 6.9.1 2022-11-25
|
|
||||||
* bugfixed: clear task extra data correctly when the connection broken
|
|
||||||
|
|
||||||
Version 6.09 2022-09-14
|
|
||||||
* use libfastcommon V1.60 and libserverframe 1.1.19
|
|
||||||
* use atomic counter instead of mutex lock
|
|
||||||
|
|
||||||
Version 6.08 2022-06-21
|
|
||||||
* use libfastcommon V1.56
|
|
||||||
NOTE: you MUST upgrade libfastcommon to V1.56 or later
|
|
||||||
|
|
||||||
Version 6.07 2020-12-31
|
|
||||||
* use libfastcommon V1.44
|
|
||||||
NOTE: you MUST upgrade libfastcommon to V1.44 or later
|
|
||||||
* correct spell iovent to ioevent follows libfastcommon
|
|
||||||
|
|
||||||
Version 6.06 2019-12-30
|
|
||||||
* bugfixed: fdfs_storaged can't quit normally
|
|
||||||
* bugfixed: init/memset return ip address to ascii 0 for Java SDK
|
|
||||||
|
|
||||||
Version 6.05 2019-12-25
|
Version 6.05 2019-12-25
|
||||||
* fdfs_trackerd and fdfs_storaged print the server version in usage.
|
* fdfs_trackerd and fdfs_storaged print the server version in usage.
|
||||||
you can execute fdfs_trackerd or fdfs_storaged without parameters
|
you can execute fdfs_trackerd or fdfs_storaged without parameters
|
||||||
|
|
|
||||||
25
INSTALL
25
INSTALL
|
|
@ -11,37 +11,28 @@ Chinese language: http://www.fastken.com/
|
||||||
# command lines as:
|
# command lines as:
|
||||||
|
|
||||||
git clone https://github.com/happyfish100/libfastcommon.git
|
git clone https://github.com/happyfish100/libfastcommon.git
|
||||||
cd libfastcommon; git checkout V1.0.75
|
cd libfastcommon; git checkout V1.0.43
|
||||||
./make.sh clean && ./make.sh && ./make.sh install
|
./make.sh clean && ./make.sh && ./make.sh install
|
||||||
|
|
||||||
|
|
||||||
# step 2. download libserverframe source codes and install it,
|
# step 2. download fastdfs source codes and install it,
|
||||||
# github address: https://github.com/happyfish100/libserverframe.git
|
|
||||||
# gitee address: https://gitee.com/fastdfs100/libserverframe.git
|
|
||||||
# command lines as:
|
|
||||||
|
|
||||||
git clone https://github.com/happyfish100/libserverframe.git
|
|
||||||
cd libserverframe; git checkout V1.2.5
|
|
||||||
./make.sh clean && ./make.sh && ./make.sh install
|
|
||||||
|
|
||||||
# step 3. download fastdfs source codes and install it,
|
|
||||||
# github address: https://github.com/happyfish100/fastdfs.git
|
# github address: https://github.com/happyfish100/fastdfs.git
|
||||||
# gitee address: https://gitee.com/fastdfs100/fastdfs.git
|
# gitee address: https://gitee.com/fastdfs100/fastdfs.git
|
||||||
# command lines as:
|
# command lines as:
|
||||||
|
|
||||||
git clone https://github.com/happyfish100/fastdfs.git
|
git clone https://github.com/happyfish100/fastdfs.git
|
||||||
cd fastdfs; git checkout V6.12.2
|
cd fastdfs; git checkout V6.05
|
||||||
./make.sh clean && ./make.sh && ./make.sh install
|
./make.sh clean && ./make.sh && ./make.sh install
|
||||||
|
|
||||||
|
|
||||||
# step 4. setup the config files
|
# step 3. setup the config files
|
||||||
# the setup script does NOT overwrite existing config files,
|
# the setup script does NOT overwrite existing config files,
|
||||||
# please feel free to execute this script (take easy :)
|
# please feel free to execute this script (take easy :)
|
||||||
|
|
||||||
./setup.sh /etc/fdfs
|
./setup.sh /etc/fdfs
|
||||||
|
|
||||||
|
|
||||||
# step 5. edit or modify the config files of tracker, storage and client
|
# step 4. edit or modify the config files of tracker, storage and client
|
||||||
such as:
|
such as:
|
||||||
vi /etc/fdfs/tracker.conf
|
vi /etc/fdfs/tracker.conf
|
||||||
vi /etc/fdfs/storage.conf
|
vi /etc/fdfs/storage.conf
|
||||||
|
|
@ -50,7 +41,7 @@ such as:
|
||||||
and so on ...
|
and so on ...
|
||||||
|
|
||||||
|
|
||||||
# step 6. run the server programs
|
# step 5. run the server programs
|
||||||
# start the tracker server:
|
# start the tracker server:
|
||||||
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf restart
|
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf restart
|
||||||
|
|
||||||
|
|
@ -62,12 +53,12 @@ such as:
|
||||||
/sbin/service fdfs_storaged restart
|
/sbin/service fdfs_storaged restart
|
||||||
|
|
||||||
|
|
||||||
# step 7. (optional) run monitor program
|
# step 6. (optional) run monitor program
|
||||||
# such as:
|
# such as:
|
||||||
/usr/bin/fdfs_monitor /etc/fdfs/client.conf
|
/usr/bin/fdfs_monitor /etc/fdfs/client.conf
|
||||||
|
|
||||||
|
|
||||||
# step 8. (optional) run the test program
|
# step 7. (optional) run the test program
|
||||||
# such as:
|
# such as:
|
||||||
/usr/bin/fdfs_test <client_conf_filename> <operation>
|
/usr/bin/fdfs_test <client_conf_filename> <operation>
|
||||||
/usr/bin/fdfs_test1 <client_conf_filename> <operation>
|
/usr/bin/fdfs_test1 <client_conf_filename> <operation>
|
||||||
|
|
|
||||||
11
README.md
11
README.md
|
|
@ -6,7 +6,7 @@ Please visit the FastDFS Home Page for more detail.
|
||||||
Chinese language: http://www.fastken.com/
|
Chinese language: http://www.fastken.com/
|
||||||
|
|
||||||
|
|
||||||
FastDFS is an open source high performance distributed file system. Its major
|
FastDFS is an open source high performance distributed file system. It's major
|
||||||
functions include: file storing, file syncing and file accessing (file uploading
|
functions include: file storing, file syncing and file accessing (file uploading
|
||||||
and file downloading), and it can resolve the high capacity and load balancing
|
and file downloading), and it can resolve the high capacity and load balancing
|
||||||
problem. FastDFS should meet the requirement of the website whose service based
|
problem. FastDFS should meet the requirement of the website whose service based
|
||||||
|
|
@ -44,12 +44,3 @@ Client test code use client library please refer to the directory: client/test.
|
||||||
|
|
||||||
For more FastDFS related articles, please subscribe the Wechat/Weixin public account
|
For more FastDFS related articles, please subscribe the Wechat/Weixin public account
|
||||||
(Chinese Language): fastdfs
|
(Chinese Language): fastdfs
|
||||||
|
|
||||||
FastDFS is a lightweight object storage solution. If you need a general distributed
|
|
||||||
file system for databases, K8s and virtual machines (such as KVM), you can learn about
|
|
||||||
[FastCFS](https://github.com/happyfish100/FastCFS) which achieves strong data consistency
|
|
||||||
and high performance.
|
|
||||||
|
|
||||||
We provide technical support service and customized development. Welcome to use WeChat or email for discuss.
|
|
||||||
|
|
||||||
email: 384681(at)qq(dot)com
|
|
||||||
|
|
|
||||||
16
README_zh.md
16
README_zh.md
|
|
@ -1,14 +1,13 @@
|
||||||
FastDFS是一款开源的分布式文件系统,功能主要包括:文件存储、文件同步、文件访问(文件上传、文件下载)等,解决了文件大容量存储和高性能访问的问题。FastDFS特别适合以文件为载体的在线服务,如图片、视频、文档等等服务。
|
FastDFS是一款开源的分布式文件系统,功能主要包括:文件存储、文件同步、文件访问(文件上传、文件下载)等,解决了文件大容量存储和高性能访问的问题。FastDFS特别适合以文件为载体的在线服务,如图片、视频、文档等等。
|
||||||
|
|
||||||
FastDFS作为一款轻量级分布式文件系统,版本V6.01代码量6.3万行。FastDFS用C语言实现,支持Linux、FreeBSD、MacOS等类UNIX系统。FastDFS类似google FS,属于应用级文件系统,不是通用的文件系统,只能通过专有API访问,目前提供了C客户端和Java SDK,以及PHP扩展SDK。
|
FastDFS作为一款轻量级分布式文件系统,版本V6.01代码量6.3万行。FastDFS用C语言实现,支持Linux、FreeBSD、MacOS等类UNIX系统。FastDFS类似google FS,属于应用级文件系统,不是通用的文件系统,只能通过专有API访问,目前提供了C和Java SDK,以及PHP扩展SDK。
|
||||||
|
|
||||||
FastDFS为互联网应用量身定做,解决大容量文件存储问题,实现高性能和高扩展性。FastDFS可以看做是基于文件的key value存储系统,key为文件ID,value为文件本身,因此称作分布式文件存储服务更为合适。
|
FastDFS为互联网应用量身定做,解决大容量文件存储问题,追求高性能和高扩展性。FastDFS可以看做是基于文件的key value存储系统,key为文件ID,value为文件内容,因此称作分布式文件存储服务更为合适。
|
||||||
|
|
||||||
FastDFS的架构比较简单,如下图所示:
|
FastDFS的架构比较简单,如下图所示:
|
||||||

|

|
||||||
|
|
||||||
```
|
FastDFS特点如下:
|
||||||
FastDFS特点:
|
|
||||||
1)分组存储,简单灵活;
|
1)分组存储,简单灵活;
|
||||||
2)对等结构,不存在单点;
|
2)对等结构,不存在单点;
|
||||||
3)文件ID由FastDFS生成,作为文件访问凭证。FastDFS不需要传统的name server或meta server;
|
3)文件ID由FastDFS生成,作为文件访问凭证。FastDFS不需要传统的name server或meta server;
|
||||||
|
|
@ -17,12 +16,5 @@
|
||||||
6)提供了nginx扩展模块,可以和nginx无缝衔接;
|
6)提供了nginx扩展模块,可以和nginx无缝衔接;
|
||||||
7)支持多线程方式上传和下载文件,支持断点续传;
|
7)支持多线程方式上传和下载文件,支持断点续传;
|
||||||
8)存储服务器上可以保存文件附加属性。
|
8)存储服务器上可以保存文件附加属性。
|
||||||
```
|
|
||||||
|
|
||||||
FastDFS更多更详细的功能和特性介绍,请参阅FastDFS微信公众号的其他文章,搜索公众号:fastdfs。
|
FastDFS更多更详细的功能和特性介绍,请参阅FastDFS微信公众号的其他文章,搜索公众号:fastdfs。
|
||||||
|
|
||||||
FastDFS是轻量级的对象存储解决方案,如果你在数据库、K8s和虚拟机(如KVM)等场景,需要使用通用分布式文件系统,可以了解一下保证数据强一致性且高性能的[FastCFS](https://gitee.com/fastdfs100/FastCFS)。
|
|
||||||
|
|
||||||
我们提供商业技术支持和定制化开发,欢迎微信或邮件洽谈。
|
|
||||||
|
|
||||||
email: 384681(at)qq(dot)com
|
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ COMPILE = $(CC) $(CFLAGS)
|
||||||
ENABLE_STATIC_LIB = $(ENABLE_STATIC_LIB)
|
ENABLE_STATIC_LIB = $(ENABLE_STATIC_LIB)
|
||||||
ENABLE_SHARED_LIB = $(ENABLE_SHARED_LIB)
|
ENABLE_SHARED_LIB = $(ENABLE_SHARED_LIB)
|
||||||
INC_PATH = -I../common -I../tracker -I/usr/include/fastcommon
|
INC_PATH = -I../common -I../tracker -I/usr/include/fastcommon
|
||||||
LIB_PATH = $(LIBS) -lfastcommon -lserverframe
|
LIB_PATH = $(LIBS) -lfastcommon
|
||||||
TARGET_PATH = $(TARGET_PREFIX)/bin
|
TARGET_PATH = $(TARGET_PREFIX)/bin
|
||||||
TARGET_LIB = $(TARGET_PREFIX)/$(LIB_VERSION)
|
TARGET_LIB = $(TARGET_PREFIX)/$(LIB_VERSION)
|
||||||
TARGET_INC = $(TARGET_PREFIX)/include
|
TARGET_INC = $(TARGET_PREFIX)/include
|
||||||
|
|
@ -49,7 +49,6 @@ CLIENT_SHARED_LIBS = libfdfsclient.so
|
||||||
ALL_LIBS = $(STATIC_LIBS) $(SHARED_LIBS)
|
ALL_LIBS = $(STATIC_LIBS) $(SHARED_LIBS)
|
||||||
|
|
||||||
all: $(ALL_OBJS) $(ALL_PRGS) $(ALL_LIBS)
|
all: $(ALL_OBJS) $(ALL_PRGS) $(ALL_LIBS)
|
||||||
|
|
||||||
libfdfsclient.so:
|
libfdfsclient.so:
|
||||||
$(COMPILE) -o $@ $< -shared $(FDFS_SHARED_OBJS) $(LIB_PATH)
|
$(COMPILE) -o $@ $< -shared $(FDFS_SHARED_OBJS) $(LIB_PATH)
|
||||||
libfdfsclient.a:
|
libfdfsclient.a:
|
||||||
|
|
@ -68,12 +67,12 @@ install:
|
||||||
mkdir -p $(TARGET_LIB)
|
mkdir -p $(TARGET_LIB)
|
||||||
mkdir -p $(TARGET_PREFIX)/lib
|
mkdir -p $(TARGET_PREFIX)/lib
|
||||||
cp -f $(ALL_PRGS) $(TARGET_PATH)
|
cp -f $(ALL_PRGS) $(TARGET_PATH)
|
||||||
if [ $(ENABLE_STATIC_LIB) -eq 1 ]; then cp -f $(STATIC_LIBS) $(TARGET_LIB); cp -f $(STATIC_LIBS) $(TARGET_PREFIX)/lib/; fi
|
if [ $(ENABLE_STATIC_LIB) -eq 1 ]; then cp -f $(STATIC_LIBS) $(TARGET_LIB); cp -f $(STATIC_LIBS) $(TARGET_PREFIX)/lib/;fi
|
||||||
if [ $(ENABLE_SHARED_LIB) -eq 1 ]; then cp -f $(CLIENT_SHARED_LIBS) $(TARGET_LIB); cp -f $(CLIENT_SHARED_LIBS) $(TARGET_PREFIX)/lib/; fi
|
if [ $(ENABLE_SHARED_LIB) -eq 1 ]; then cp -f $(CLIENT_SHARED_LIBS) $(TARGET_LIB); cp -f $(CLIENT_SHARED_LIBS) $(TARGET_PREFIX)/lib/;fi
|
||||||
|
|
||||||
mkdir -p $(TARGET_INC)/fastdfs
|
mkdir -p $(TARGET_INC)/fastdfs
|
||||||
cp -f $(FDFS_HEADER_FILES) $(TARGET_INC)/fastdfs
|
cp -f $(FDFS_HEADER_FILES) $(TARGET_INC)/fastdfs
|
||||||
if [ ! -f $(CONFIG_PATH)/client.conf ]; then cp -f ../conf/client.conf $(CONFIG_PATH)/client.conf; fi
|
if [ ! -f $(CONFIG_PATH)/client.conf.sample ]; then cp -f ../conf/client.conf $(CONFIG_PATH)/client.conf.sample; fi
|
||||||
clean:
|
clean:
|
||||||
rm -f $(ALL_OBJS) $(ALL_PRGS) $(ALL_LIBS)
|
rm -f $(ALL_OBJS) $(ALL_PRGS) $(ALL_LIBS)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -134,18 +134,17 @@ static int copy_tracker_servers(TrackerServerGroup *pTrackerGroup,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
{
|
{
|
||||||
TrackerServerInfo *pServer;
|
TrackerServerInfo *pServer;
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
for (pServer=pTrackerGroup->servers; pServer<pTrackerGroup->servers+ \
|
||||||
for (pServer=pTrackerGroup->servers; pServer<pTrackerGroup->servers+
|
|
||||||
pTrackerGroup->server_count; pServer++)
|
pTrackerGroup->server_count; pServer++)
|
||||||
{
|
{
|
||||||
format_ip_address(pServer->connections[0].ip_addr, formatted_ip);
|
//printf("server=%s:%d\n", \
|
||||||
printf("server=%s:%u\n", formatted_ip, pServer->connections[0].port);
|
pServer->ip_addr, pServer->port);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
@ -242,7 +241,7 @@ static int fdfs_get_params_from_tracker(bool *use_storage_id)
|
||||||
|
|
||||||
continue_flag = false;
|
continue_flag = false;
|
||||||
if ((result=fdfs_get_ini_context_from_tracker(&g_tracker_group,
|
if ((result=fdfs_get_ini_context_from_tracker(&g_tracker_group,
|
||||||
&iniContext, &continue_flag)) != 0)
|
&iniContext, &continue_flag, false, NULL)) != 0)
|
||||||
{
|
{
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
@ -271,41 +270,41 @@ static int fdfs_client_do_init_ex(TrackerServerGroup *pTrackerGroup, \
|
||||||
pBasePath = iniGetStrValue(NULL, "base_path", iniContext);
|
pBasePath = iniGetStrValue(NULL, "base_path", iniContext);
|
||||||
if (pBasePath == NULL)
|
if (pBasePath == NULL)
|
||||||
{
|
{
|
||||||
strcpy(SF_G_BASE_PATH_STR, "/tmp");
|
strcpy(g_fdfs_base_path, "/tmp");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
snprintf(SF_G_BASE_PATH_STR, sizeof(SF_G_BASE_PATH_STR),
|
snprintf(g_fdfs_base_path, sizeof(g_fdfs_base_path),
|
||||||
"%s", pBasePath);
|
"%s", pBasePath);
|
||||||
chopPath(SF_G_BASE_PATH_STR);
|
chopPath(g_fdfs_base_path);
|
||||||
if (!fileExists(SF_G_BASE_PATH_STR))
|
if (!fileExists(g_fdfs_base_path))
|
||||||
{
|
{
|
||||||
logError("file: "__FILE__", line: %d, " \
|
logError("file: "__FILE__", line: %d, " \
|
||||||
"\"%s\" can't be accessed, error info: %s", \
|
"\"%s\" can't be accessed, error info: %s", \
|
||||||
__LINE__, SF_G_BASE_PATH_STR, STRERROR(errno));
|
__LINE__, g_fdfs_base_path, STRERROR(errno));
|
||||||
return errno != 0 ? errno : ENOENT;
|
return errno != 0 ? errno : ENOENT;
|
||||||
}
|
}
|
||||||
if (!isDir(SF_G_BASE_PATH_STR))
|
if (!isDir(g_fdfs_base_path))
|
||||||
{
|
{
|
||||||
logError("file: "__FILE__", line: %d, " \
|
logError("file: "__FILE__", line: %d, " \
|
||||||
"\"%s\" is not a directory!", \
|
"\"%s\" is not a directory!", \
|
||||||
__LINE__, SF_G_BASE_PATH_STR);
|
__LINE__, g_fdfs_base_path);
|
||||||
return ENOTDIR;
|
return ENOTDIR;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SF_G_CONNECT_TIMEOUT = iniGetIntValue(NULL, "connect_timeout", \
|
g_fdfs_connect_timeout = iniGetIntValue(NULL, "connect_timeout", \
|
||||||
iniContext, DEFAULT_CONNECT_TIMEOUT);
|
iniContext, DEFAULT_CONNECT_TIMEOUT);
|
||||||
if (SF_G_CONNECT_TIMEOUT <= 0)
|
if (g_fdfs_connect_timeout <= 0)
|
||||||
{
|
{
|
||||||
SF_G_CONNECT_TIMEOUT = DEFAULT_CONNECT_TIMEOUT;
|
g_fdfs_connect_timeout = DEFAULT_CONNECT_TIMEOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
SF_G_NETWORK_TIMEOUT = iniGetIntValue(NULL, "network_timeout", \
|
g_fdfs_network_timeout = iniGetIntValue(NULL, "network_timeout", \
|
||||||
iniContext, DEFAULT_NETWORK_TIMEOUT);
|
iniContext, DEFAULT_NETWORK_TIMEOUT);
|
||||||
if (SF_G_NETWORK_TIMEOUT <= 0)
|
if (g_fdfs_network_timeout <= 0)
|
||||||
{
|
{
|
||||||
SF_G_NETWORK_TIMEOUT = DEFAULT_NETWORK_TIMEOUT;
|
g_fdfs_network_timeout = DEFAULT_NETWORK_TIMEOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((result=fdfs_load_tracker_group_ex(pTrackerGroup, \
|
if ((result=fdfs_load_tracker_group_ex(pTrackerGroup, \
|
||||||
|
|
@ -349,77 +348,39 @@ static int fdfs_client_do_init_ex(TrackerServerGroup *pTrackerGroup, \
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
load_fdfs_parameters_from_tracker = iniGetBoolValue(NULL,
|
load_fdfs_parameters_from_tracker = iniGetBoolValue(NULL, \
|
||||||
"load_fdfs_parameters_from_tracker",
|
"load_fdfs_parameters_from_tracker", \
|
||||||
iniContext, false);
|
iniContext, false);
|
||||||
if (load_fdfs_parameters_from_tracker)
|
if (load_fdfs_parameters_from_tracker)
|
||||||
{
|
{
|
||||||
if ((result=fdfs_get_params_from_tracker(&use_storage_id)) != 0)
|
fdfs_get_params_from_tracker(&use_storage_id);
|
||||||
{
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
use_storage_id = iniGetBoolValue(NULL, "use_storage_id",
|
use_storage_id = iniGetBoolValue(NULL, "use_storage_id", \
|
||||||
iniContext, false);
|
iniContext, false);
|
||||||
if (use_storage_id)
|
if (use_storage_id)
|
||||||
{
|
{
|
||||||
if ((result=fdfs_load_storage_ids_from_file(
|
result = fdfs_load_storage_ids_from_file( \
|
||||||
conf_filename, iniContext)) != 0)
|
conf_filename, iniContext);
|
||||||
{
|
}
|
||||||
return result;
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (use_storage_id)
|
|
||||||
{
|
|
||||||
FDFSStorageIdInfo *idInfo;
|
|
||||||
FDFSStorageIdInfo *end;
|
|
||||||
char *connect_first_by;
|
|
||||||
|
|
||||||
end = g_storage_ids_by_id.ids + g_storage_ids_by_id.count;
|
|
||||||
for (idInfo=g_storage_ids_by_id.ids; idInfo<end; idInfo++)
|
|
||||||
{
|
|
||||||
if (idInfo->ip_addrs.count > 1)
|
|
||||||
{
|
|
||||||
g_multi_storage_ips = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (g_multi_storage_ips)
|
|
||||||
{
|
|
||||||
connect_first_by = iniGetStrValue(NULL,
|
|
||||||
"connect_first_by", iniContext);
|
|
||||||
if (connect_first_by != NULL && strncasecmp(connect_first_by,
|
|
||||||
"last", 4) == 0)
|
|
||||||
{
|
|
||||||
g_connect_first_by = fdfs_connect_first_by_last_connected;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef DEBUG_FLAG
|
#ifdef DEBUG_FLAG
|
||||||
logDebug("base_path=%s, "
|
logDebug("base_path=%s, " \
|
||||||
"connect_timeout=%d, "
|
"connect_timeout=%d, "\
|
||||||
"network_timeout=%d, "
|
"network_timeout=%d, "\
|
||||||
"tracker_server_count=%d, "
|
"tracker_server_count=%d, " \
|
||||||
"anti_steal_token=%d, "
|
"anti_steal_token=%d, " \
|
||||||
"anti_steal_secret_key length=%d, "
|
"anti_steal_secret_key length=%d, " \
|
||||||
"use_connection_pool=%d, "
|
"use_connection_pool=%d, " \
|
||||||
"g_connection_pool_max_idle_time=%ds, "
|
"g_connection_pool_max_idle_time=%ds, " \
|
||||||
"use_storage_id=%d, connect_first_by=%s, "
|
"use_storage_id=%d, storage server id count: %d\n", \
|
||||||
"storage server id count: %d, "
|
g_fdfs_base_path, g_fdfs_connect_timeout, \
|
||||||
"multi storage ips: %d\n",
|
g_fdfs_network_timeout, pTrackerGroup->server_count, \
|
||||||
SF_G_BASE_PATH_STR, SF_G_CONNECT_TIMEOUT,
|
g_anti_steal_token, g_anti_steal_secret_key.length, \
|
||||||
SF_G_NETWORK_TIMEOUT, pTrackerGroup->server_count,
|
g_use_connection_pool, g_connection_pool_max_idle_time, \
|
||||||
g_anti_steal_token, g_anti_steal_secret_key.length,
|
use_storage_id, g_storage_ids_by_id.count);
|
||||||
g_use_connection_pool, g_connection_pool_max_idle_time,
|
|
||||||
use_storage_id, g_connect_first_by == fdfs_connect_first_by_tracker ?
|
|
||||||
"tracker" : "last-connected", g_storage_ids_by_id.count,
|
|
||||||
g_multi_storage_ips);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,6 @@
|
||||||
int g_tracker_server_http_port = 80;
|
int g_tracker_server_http_port = 80;
|
||||||
TrackerServerGroup g_tracker_group = {0, 0, -1, NULL};
|
TrackerServerGroup g_tracker_group = {0, 0, -1, NULL};
|
||||||
|
|
||||||
bool g_multi_storage_ips = false;
|
|
||||||
FDFSConnectFirstBy g_connect_first_by = fdfs_connect_first_by_tracker;
|
|
||||||
bool g_anti_steal_token = false;
|
bool g_anti_steal_token = false;
|
||||||
BufferInfo g_anti_steal_secret_key = {0};
|
BufferInfo g_anti_steal_secret_key = {0};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,11 +15,6 @@
|
||||||
#include "tracker_types.h"
|
#include "tracker_types.h"
|
||||||
#include "fdfs_shared_func.h"
|
#include "fdfs_shared_func.h"
|
||||||
|
|
||||||
typedef enum {
|
|
||||||
fdfs_connect_first_by_tracker,
|
|
||||||
fdfs_connect_first_by_last_connected
|
|
||||||
} FDFSConnectFirstBy;
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -27,8 +22,6 @@ extern "C" {
|
||||||
extern int g_tracker_server_http_port;
|
extern int g_tracker_server_http_port;
|
||||||
extern TrackerServerGroup g_tracker_group;
|
extern TrackerServerGroup g_tracker_group;
|
||||||
|
|
||||||
extern bool g_multi_storage_ips;
|
|
||||||
extern FDFSConnectFirstBy g_connect_first_by;
|
|
||||||
extern bool g_anti_steal_token;
|
extern bool g_anti_steal_token;
|
||||||
extern BufferInfo g_anti_steal_secret_key;
|
extern BufferInfo g_anti_steal_secret_key;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ int uploadFileCallback(void *arg, const int64_t file_size, int sock)
|
||||||
|
|
||||||
filename = (char *)arg;
|
filename = (char *)arg;
|
||||||
return tcpsendfile(sock, filename, file_size, \
|
return tcpsendfile(sock, filename, file_size, \
|
||||||
SF_G_NETWORK_TIMEOUT, &total_send_bytes);
|
g_fdfs_network_timeout, &total_send_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
|
|
@ -79,13 +79,13 @@ int main(int argc, char *argv[])
|
||||||
const char *file_ext_name;
|
const char *file_ext_name;
|
||||||
struct stat stat_buf;
|
struct stat stat_buf;
|
||||||
|
|
||||||
printf("This is FastDFS client test program v%d.%d.%d\n" \
|
printf("This is FastDFS client test program v%d.%02d\n" \
|
||||||
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
||||||
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
||||||
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
||||||
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
||||||
"for more detail.\n\n", g_fdfs_version.major, g_fdfs_version.minor,
|
"for more detail.\n\n" \
|
||||||
g_fdfs_version.patch);
|
, g_fdfs_version.major, g_fdfs_version.minor);
|
||||||
|
|
||||||
if (argc < 3)
|
if (argc < 3)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ int uploadFileCallback(void *arg, const int64_t file_size, int sock)
|
||||||
|
|
||||||
filename = (char *)arg;
|
filename = (char *)arg;
|
||||||
return tcpsendfile(sock, filename, file_size, \
|
return tcpsendfile(sock, filename, file_size, \
|
||||||
SF_G_NETWORK_TIMEOUT, &total_send_bytes);
|
g_fdfs_network_timeout, &total_send_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
|
|
@ -78,13 +78,13 @@ int main(int argc, char *argv[])
|
||||||
const char *file_ext_name;
|
const char *file_ext_name;
|
||||||
struct stat stat_buf;
|
struct stat stat_buf;
|
||||||
|
|
||||||
printf("This is FastDFS client test program v%d.%d.%d\n" \
|
printf("This is FastDFS client test program v%d.%02d\n" \
|
||||||
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
||||||
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
||||||
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
||||||
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
||||||
"for more detail.\n\n", g_fdfs_version.major, g_fdfs_version.minor,
|
"for more detail.\n\n" \
|
||||||
g_fdfs_version.patch);
|
, g_fdfs_version.major, g_fdfs_version.minor);
|
||||||
|
|
||||||
if (argc < 3)
|
if (argc < 3)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -35,13 +35,12 @@ static void usage(char *argv[])
|
||||||
|
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
{
|
{
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *conf_filename;
|
char *conf_filename;
|
||||||
|
int result;
|
||||||
char *op_type;
|
char *op_type;
|
||||||
char *group_name;
|
|
||||||
char *tracker_server;
|
char *tracker_server;
|
||||||
int arg_index;
|
int arg_index;
|
||||||
int result;
|
char *group_name;
|
||||||
|
|
||||||
if (argc < 2)
|
if (argc < 2)
|
||||||
{
|
{
|
||||||
|
|
@ -97,7 +96,7 @@ int main(int argc, char *argv[])
|
||||||
}
|
}
|
||||||
|
|
||||||
log_init();
|
log_init();
|
||||||
//g_log_context.log_level = LOG_DEBUG;
|
g_log_context.log_level = LOG_DEBUG;
|
||||||
ignore_signal_pipe();
|
ignore_signal_pipe();
|
||||||
|
|
||||||
if ((result=fdfs_client_init(conf_filename)) != 0)
|
if ((result=fdfs_client_init(conf_filename)) != 0)
|
||||||
|
|
@ -156,9 +155,7 @@ int main(int argc, char *argv[])
|
||||||
fdfs_client_destroy();
|
fdfs_client_destroy();
|
||||||
return errno != 0 ? errno : ECONNREFUSED;
|
return errno != 0 ? errno : ECONNREFUSED;
|
||||||
}
|
}
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
printf("\ntracker server is %s:%d\n\n", pTrackerServer->ip_addr, pTrackerServer->port);
|
||||||
printf("\ntracker server is %s:%u\n\n", formatted_ip,
|
|
||||||
pTrackerServer->port);
|
|
||||||
|
|
||||||
if (arg_index < argc)
|
if (arg_index < argc)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,7 @@ int uploadFileCallback(void *arg, const int64_t file_size, int sock)
|
||||||
|
|
||||||
filename = (char *)arg;
|
filename = (char *)arg;
|
||||||
return tcpsendfile(sock, filename, file_size, \
|
return tcpsendfile(sock, filename, file_size, \
|
||||||
SF_G_NETWORK_TIMEOUT, &total_send_bytes);
|
g_fdfs_network_timeout, &total_send_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
|
|
@ -66,7 +66,6 @@ int main(int argc, char *argv[])
|
||||||
int meta_count;
|
int meta_count;
|
||||||
int i;
|
int i;
|
||||||
FDFSMetaData *pMetaList;
|
FDFSMetaData *pMetaList;
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char token[32 + 1];
|
char token[32 + 1];
|
||||||
char file_id[128];
|
char file_id[128];
|
||||||
char file_url[256];
|
char file_url[256];
|
||||||
|
|
@ -74,20 +73,20 @@ int main(int argc, char *argv[])
|
||||||
char szPortPart[16];
|
char szPortPart[16];
|
||||||
int url_len;
|
int url_len;
|
||||||
time_t ts;
|
time_t ts;
|
||||||
char *file_buff;
|
char *file_buff;
|
||||||
int64_t file_size;
|
int64_t file_size;
|
||||||
char *operation;
|
char *operation;
|
||||||
char *meta_buff;
|
char *meta_buff;
|
||||||
int store_path_index;
|
int store_path_index;
|
||||||
FDFSFileInfo file_info;
|
FDFSFileInfo file_info;
|
||||||
|
|
||||||
printf("This is FastDFS client test program v%d.%d.%d\n" \
|
printf("This is FastDFS client test program v%d.%02d\n" \
|
||||||
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
||||||
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
||||||
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
||||||
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
||||||
"for more detail.\n\n", g_fdfs_version.major, g_fdfs_version.minor,
|
"for more detail.\n\n" \
|
||||||
g_fdfs_version.patch);
|
, g_fdfs_version.major, g_fdfs_version.minor);
|
||||||
|
|
||||||
if (argc < 3)
|
if (argc < 3)
|
||||||
{
|
{
|
||||||
|
|
@ -461,9 +460,8 @@ g_fdfs_version.patch);
|
||||||
printf("server list (%d):\n", server_count);
|
printf("server list (%d):\n", server_count);
|
||||||
for (i=0; i<server_count; i++)
|
for (i=0; i<server_count; i++)
|
||||||
{
|
{
|
||||||
format_ip_address(storageServers[i].
|
printf("\t%s:%d\n", \
|
||||||
ip_addr, formatted_ip);
|
storageServers[i].ip_addr, \
|
||||||
printf("\t%s:%u\n", formatted_ip,
|
|
||||||
storageServers[i].port);
|
storageServers[i].port);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
@ -490,8 +488,8 @@ g_fdfs_version.patch);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
format_ip_address(storageServer.ip_addr, formatted_ip);
|
printf("storage=%s:%d\n", storageServer.ip_addr, \
|
||||||
printf("storage=%s:%u\n", formatted_ip, storageServer.port);
|
storageServer.port);
|
||||||
|
|
||||||
if ((pStorageServer=tracker_make_connection(&storageServer, \
|
if ((pStorageServer=tracker_make_connection(&storageServer, \
|
||||||
&result)) == NULL)
|
&result)) == NULL)
|
||||||
|
|
@ -672,17 +670,15 @@ g_fdfs_version.patch);
|
||||||
/* for test only */
|
/* for test only */
|
||||||
if ((result=fdfs_active_test(pTrackerServer)) != 0)
|
if ((result=fdfs_active_test(pTrackerServer)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
printf("active_test to tracker server %s:%d fail, errno: %d\n", \
|
||||||
printf("active_test to tracker server %s:%u fail, errno: %d\n",
|
pTrackerServer->ip_addr, pTrackerServer->port, result);
|
||||||
formatted_ip, pTrackerServer->port, result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* for test only */
|
/* for test only */
|
||||||
if ((result=fdfs_active_test(pStorageServer)) != 0)
|
if ((result=fdfs_active_test(pStorageServer)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
printf("active_test to storage server %s:%d fail, errno: %d\n", \
|
||||||
printf("active_test to storage server %s:%u fail, errno: %d\n",
|
pStorageServer->ip_addr, pStorageServer->port, result);
|
||||||
formatted_ip, pStorageServer->port, result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tracker_close_connection_ex(pStorageServer, true);
|
tracker_close_connection_ex(pStorageServer, true);
|
||||||
|
|
@ -692,3 +688,4 @@ g_fdfs_version.patch);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ int uploadFileCallback(void *arg, const int64_t file_size, int sock)
|
||||||
|
|
||||||
filename = (char *)arg;
|
filename = (char *)arg;
|
||||||
return tcpsendfile(sock, filename, file_size, \
|
return tcpsendfile(sock, filename, file_size, \
|
||||||
SF_G_NETWORK_TIMEOUT, &total_send_bytes);
|
g_fdfs_network_timeout, &total_send_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char *argv[])
|
int main(int argc, char *argv[])
|
||||||
|
|
@ -63,7 +63,6 @@ int main(int argc, char *argv[])
|
||||||
int meta_count;
|
int meta_count;
|
||||||
int i;
|
int i;
|
||||||
FDFSMetaData *pMetaList;
|
FDFSMetaData *pMetaList;
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char token[32 + 1];
|
char token[32 + 1];
|
||||||
char file_id[128];
|
char file_id[128];
|
||||||
char master_file_id[128];
|
char master_file_id[128];
|
||||||
|
|
@ -72,20 +71,20 @@ int main(int argc, char *argv[])
|
||||||
char szPortPart[16];
|
char szPortPart[16];
|
||||||
int url_len;
|
int url_len;
|
||||||
time_t ts;
|
time_t ts;
|
||||||
char *file_buff;
|
char *file_buff;
|
||||||
int64_t file_size;
|
int64_t file_size;
|
||||||
char *operation;
|
char *operation;
|
||||||
char *meta_buff;
|
char *meta_buff;
|
||||||
int store_path_index;
|
int store_path_index;
|
||||||
FDFSFileInfo file_info;
|
FDFSFileInfo file_info;
|
||||||
|
|
||||||
printf("This is FastDFS client test program v%d.%d.%d\n" \
|
printf("This is FastDFS client test program v%d.%02d\n" \
|
||||||
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
||||||
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
||||||
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
||||||
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
||||||
"for more detail.\n\n", g_fdfs_version.major, g_fdfs_version.minor,
|
"for more detail.\n\n" \
|
||||||
g_fdfs_version.patch);
|
, g_fdfs_version.major, g_fdfs_version.minor);
|
||||||
|
|
||||||
if (argc < 3)
|
if (argc < 3)
|
||||||
{
|
{
|
||||||
|
|
@ -433,9 +432,8 @@ g_fdfs_version.patch);
|
||||||
printf("server list (%d):\n", server_count);
|
printf("server list (%d):\n", server_count);
|
||||||
for (i=0; i<server_count; i++)
|
for (i=0; i<server_count; i++)
|
||||||
{
|
{
|
||||||
format_ip_address(storageServers[i].
|
printf("\t%s:%d\n", \
|
||||||
ip_addr, formatted_ip);
|
storageServers[i].ip_addr, \
|
||||||
printf("\t%s:%u\n", formatted_ip,
|
|
||||||
storageServers[i].port);
|
storageServers[i].port);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
@ -457,8 +455,8 @@ g_fdfs_version.patch);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
format_ip_address(storageServer.ip_addr, formatted_ip);
|
printf("storage=%s:%d\n", storageServer.ip_addr, \
|
||||||
printf("storage=%s:%u\n", formatted_ip, storageServer.port);
|
storageServer.port);
|
||||||
|
|
||||||
if ((pStorageServer=tracker_make_connection(&storageServer, \
|
if ((pStorageServer=tracker_make_connection(&storageServer, \
|
||||||
&result)) == NULL)
|
&result)) == NULL)
|
||||||
|
|
@ -639,17 +637,15 @@ g_fdfs_version.patch);
|
||||||
/* for test only */
|
/* for test only */
|
||||||
if ((result=fdfs_active_test(pTrackerServer)) != 0)
|
if ((result=fdfs_active_test(pTrackerServer)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
printf("active_test to tracker server %s:%d fail, errno: %d\n", \
|
||||||
printf("active_test to tracker server %s:%u fail, errno: %d\n",
|
pTrackerServer->ip_addr, pTrackerServer->port, result);
|
||||||
formatted_ip, pTrackerServer->port, result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* for test only */
|
/* for test only */
|
||||||
if ((result=fdfs_active_test(pStorageServer)) != 0)
|
if ((result=fdfs_active_test(pStorageServer)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
printf("active_test to storage server %s:%d fail, errno: %d\n", \
|
||||||
printf("active_test to storage server %s:%u fail, errno: %d\n",
|
pStorageServer->ip_addr, pStorageServer->port, result);
|
||||||
formatted_ip, pStorageServer->port, result);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tracker_close_connection_ex(pStorageServer, true);
|
tracker_close_connection_ex(pStorageServer, true);
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,8 @@
|
||||||
#include "client_global.h"
|
#include "client_global.h"
|
||||||
#include "fastcommon/base64.h"
|
#include "fastcommon/base64.h"
|
||||||
|
|
||||||
static int g_base64_context_inited = 0;
|
static struct base64_context the_base64_context;
|
||||||
|
static int the_base64_context_inited = 0;
|
||||||
|
|
||||||
#define FDFS_SPLIT_GROUP_NAME_AND_FILENAME(file_id) \
|
#define FDFS_SPLIT_GROUP_NAME_AND_FILENAME(file_id) \
|
||||||
char in_file_id[FDFS_GROUP_NAME_MAX_LEN + 128]; \
|
char in_file_id[FDFS_GROUP_NAME_MAX_LEN + 128]; \
|
||||||
|
|
@ -64,103 +65,6 @@ static int g_base64_context_inited = 0;
|
||||||
ppStorageServer, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE, \
|
ppStorageServer, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE, \
|
||||||
group_name, filename, pNewStorage, new_connection)
|
group_name, filename, pNewStorage, new_connection)
|
||||||
|
|
||||||
static ConnectionInfo *make_connection_by_tracker(
|
|
||||||
ConnectionInfo *pStorageServer, int *err_no)
|
|
||||||
{
|
|
||||||
ConnectionInfo *conn;
|
|
||||||
FDFSStorageIdInfo *idInfo;
|
|
||||||
|
|
||||||
if ((conn=tracker_make_connection(pStorageServer, err_no)) != NULL)
|
|
||||||
{
|
|
||||||
return conn;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!g_multi_storage_ips)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((idInfo=fdfs_get_storage_id_by_ip_port(pStorageServer->ip_addr,
|
|
||||||
pStorageServer->port)) == NULL)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (idInfo->ip_addrs.count < 2)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (strcmp(pStorageServer->ip_addr, idInfo->ip_addrs.ips[0].address) == 0)
|
|
||||||
{
|
|
||||||
strcpy(pStorageServer->ip_addr, idInfo->ip_addrs.ips[1].address);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
strcpy(pStorageServer->ip_addr, idInfo->ip_addrs.ips[0].address);
|
|
||||||
}
|
|
||||||
return tracker_make_connection(pStorageServer, err_no);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ConnectionInfo *make_connection_by_last_connected(
|
|
||||||
ConnectionInfo *pStorageServer, int *err_no)
|
|
||||||
{
|
|
||||||
ConnectionInfo *conn;
|
|
||||||
FDFSStorageIdInfo *idInfo;
|
|
||||||
int index;
|
|
||||||
|
|
||||||
if (!g_multi_storage_ips)
|
|
||||||
{
|
|
||||||
return tracker_make_connection(pStorageServer, err_no);
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((idInfo=fdfs_get_storage_id_by_ip_port(pStorageServer->ip_addr,
|
|
||||||
pStorageServer->port)) == NULL)
|
|
||||||
{
|
|
||||||
return tracker_make_connection(pStorageServer, err_no);
|
|
||||||
}
|
|
||||||
if (idInfo->ip_addrs.count < 2)
|
|
||||||
{
|
|
||||||
return tracker_make_connection(pStorageServer, err_no);
|
|
||||||
}
|
|
||||||
|
|
||||||
index = idInfo->ip_addrs.index;
|
|
||||||
if (strcmp(pStorageServer->ip_addr, idInfo->ip_addrs.
|
|
||||||
ips[index].address) != 0)
|
|
||||||
{
|
|
||||||
strcpy(pStorageServer->ip_addr, idInfo->ip_addrs.
|
|
||||||
ips[index].address);
|
|
||||||
}
|
|
||||||
if ((conn=tracker_make_connection(pStorageServer, err_no)) != NULL)
|
|
||||||
{
|
|
||||||
return conn;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (++index == idInfo->ip_addrs.count)
|
|
||||||
{
|
|
||||||
index = 0;
|
|
||||||
}
|
|
||||||
strcpy(pStorageServer->ip_addr, idInfo->ip_addrs.ips[index].address);
|
|
||||||
if ((conn=tracker_make_connection(pStorageServer, err_no)) != NULL)
|
|
||||||
{
|
|
||||||
idInfo->ip_addrs.index = index;
|
|
||||||
}
|
|
||||||
return conn;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline ConnectionInfo *storage_make_connection(
|
|
||||||
ConnectionInfo *pStorageServer, int *err_no)
|
|
||||||
{
|
|
||||||
if (g_connect_first_by == fdfs_connect_first_by_tracker)
|
|
||||||
{
|
|
||||||
return make_connection_by_tracker(pStorageServer, err_no);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return make_connection_by_last_connected(pStorageServer, err_no);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int storage_get_connection(ConnectionInfo *pTrackerServer, \
|
static int storage_get_connection(ConnectionInfo *pTrackerServer, \
|
||||||
ConnectionInfo **ppStorageServer, const byte cmd, \
|
ConnectionInfo **ppStorageServer, const byte cmd, \
|
||||||
const char *group_name, const char *filename, \
|
const char *group_name, const char *filename, \
|
||||||
|
|
@ -194,7 +98,7 @@ static int storage_get_connection(ConnectionInfo *pTrackerServer, \
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((*ppStorageServer=storage_make_connection(pNewStorage,
|
if ((*ppStorageServer=tracker_make_connection(pNewStorage,
|
||||||
&result)) == NULL)
|
&result)) == NULL)
|
||||||
{
|
{
|
||||||
return result;
|
return result;
|
||||||
|
|
@ -210,7 +114,7 @@ static int storage_get_connection(ConnectionInfo *pTrackerServer, \
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if ((*ppStorageServer=storage_make_connection(
|
if ((*ppStorageServer=tracker_make_connection(
|
||||||
*ppStorageServer, &result)) == NULL)
|
*ppStorageServer, &result)) == NULL)
|
||||||
{
|
{
|
||||||
return result;
|
return result;
|
||||||
|
|
@ -259,7 +163,7 @@ static int storage_get_upload_connection(ConnectionInfo *pTrackerServer, \
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((*ppStorageServer=storage_make_connection(pNewStorage,
|
if ((*ppStorageServer=tracker_make_connection(pNewStorage,
|
||||||
&result)) == NULL)
|
&result)) == NULL)
|
||||||
{
|
{
|
||||||
return result;
|
return result;
|
||||||
|
|
@ -275,7 +179,7 @@ static int storage_get_upload_connection(ConnectionInfo *pTrackerServer, \
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if ((*ppStorageServer=storage_make_connection(
|
if ((*ppStorageServer=tracker_make_connection(
|
||||||
*ppStorageServer, &result)) == NULL)
|
*ppStorageServer, &result)) == NULL)
|
||||||
{
|
{
|
||||||
return result;
|
return result;
|
||||||
|
|
@ -309,7 +213,6 @@ int storage_get_metadata(ConnectionInfo *pTrackerServer, \
|
||||||
int result;
|
int result;
|
||||||
ConnectionInfo storageServer;
|
ConnectionInfo storageServer;
|
||||||
char out_buff[sizeof(TrackerHeader)+FDFS_GROUP_NAME_MAX_LEN+128];
|
char out_buff[sizeof(TrackerHeader)+FDFS_GROUP_NAME_MAX_LEN+128];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int filename_len;
|
int filename_len;
|
||||||
char *file_buff;
|
char *file_buff;
|
||||||
|
|
@ -347,15 +250,15 @@ int storage_get_metadata(ConnectionInfo *pTrackerServer, \
|
||||||
long2buff(FDFS_GROUP_NAME_MAX_LEN + filename_len, pHeader->pkg_len);
|
long2buff(FDFS_GROUP_NAME_MAX_LEN + filename_len, pHeader->pkg_len);
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_GET_METADATA;
|
pHeader->cmd = STORAGE_PROTO_CMD_GET_METADATA;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
||||||
filename_len, SF_G_NETWORK_TIMEOUT)) != 0)
|
filename_len, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -411,7 +314,6 @@ int storage_query_file_info_ex(ConnectionInfo *pTrackerServer, \
|
||||||
ConnectionInfo storageServer;
|
ConnectionInfo storageServer;
|
||||||
char out_buff[sizeof(TrackerHeader)+FDFS_GROUP_NAME_MAX_LEN+128];
|
char out_buff[sizeof(TrackerHeader)+FDFS_GROUP_NAME_MAX_LEN+128];
|
||||||
char in_buff[3 * FDFS_PROTO_PKG_LEN_SIZE + IP_ADDRESS_SIZE];
|
char in_buff[3 * FDFS_PROTO_PKG_LEN_SIZE + IP_ADDRESS_SIZE];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char buff[64];
|
char buff[64];
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int filename_len;
|
int filename_len;
|
||||||
|
|
@ -448,15 +350,15 @@ int storage_query_file_info_ex(ConnectionInfo *pTrackerServer, \
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_QUERY_FILE_INFO;
|
pHeader->cmd = STORAGE_PROTO_CMD_QUERY_FILE_INFO;
|
||||||
pHeader->status = bSilence ? ENOENT : 0;
|
pHeader->status = bSilence ? ENOENT : 0;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
||||||
filename_len, SF_G_NETWORK_TIMEOUT)) != 0)
|
filename_len, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -473,25 +375,25 @@ int storage_query_file_info_ex(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes != sizeof(in_buff))
|
if (in_bytes != sizeof(in_buff))
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"recv data from storage server %s:%d fail, " \
|
||||||
"recv data from storage server %s:%u fail, "
|
"recv bytes: %"PRId64" != %d", __LINE__, \
|
||||||
"recv bytes: %"PRId64" != %d", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, in_bytes, (int)sizeof(in_buff));
|
in_bytes, (int)sizeof(in_buff));
|
||||||
result = EINVAL;
|
result = EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!g_base64_context_inited)
|
if (!the_base64_context_inited)
|
||||||
{
|
{
|
||||||
g_base64_context_inited = 1;
|
the_base64_context_inited = 1;
|
||||||
base64_init_ex(&g_fdfs_base64_context, 0, '-', '_', '.');
|
base64_init_ex(&the_base64_context, 0, '-', '_', '.');
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(buff, 0, sizeof(buff));
|
memset(buff, 0, sizeof(buff));
|
||||||
if (filename_len >= FDFS_LOGIC_FILE_PATH_LEN \
|
if (filename_len >= FDFS_LOGIC_FILE_PATH_LEN \
|
||||||
+ FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
|
+ FDFS_FILENAME_BASE64_LENGTH + FDFS_FILE_EXT_NAME_MAX_LEN + 1)
|
||||||
{
|
{
|
||||||
base64_decode_auto(&g_fdfs_base64_context, (char *)filename + \
|
base64_decode_auto(&the_base64_context, (char *)filename + \
|
||||||
FDFS_LOGIC_FILE_PATH_LEN, FDFS_FILENAME_BASE64_LENGTH, \
|
FDFS_LOGIC_FILE_PATH_LEN, FDFS_FILENAME_BASE64_LENGTH, \
|
||||||
buff, &buff_len);
|
buff, &buff_len);
|
||||||
}
|
}
|
||||||
|
|
@ -545,7 +447,6 @@ int storage_delete_file(ConnectionInfo *pTrackerServer, \
|
||||||
int result;
|
int result;
|
||||||
ConnectionInfo storageServer;
|
ConnectionInfo storageServer;
|
||||||
char out_buff[sizeof(TrackerHeader)+FDFS_GROUP_NAME_MAX_LEN+128];
|
char out_buff[sizeof(TrackerHeader)+FDFS_GROUP_NAME_MAX_LEN+128];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char in_buff[1];
|
char in_buff[1];
|
||||||
char *pBuff;
|
char *pBuff;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
|
|
@ -579,15 +480,15 @@ int storage_delete_file(ConnectionInfo *pTrackerServer, \
|
||||||
long2buff(FDFS_GROUP_NAME_MAX_LEN + filename_len, pHeader->pkg_len);
|
long2buff(FDFS_GROUP_NAME_MAX_LEN + filename_len, pHeader->pkg_len);
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_DELETE_FILE;
|
pHeader->cmd = STORAGE_PROTO_CMD_DELETE_FILE;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
||||||
filename_len, SF_G_NETWORK_TIMEOUT)) != 0)
|
filename_len, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -635,7 +536,6 @@ int storage_do_download_file_ex(ConnectionInfo *pTrackerServer, \
|
||||||
int result;
|
int result;
|
||||||
ConnectionInfo storageServer;
|
ConnectionInfo storageServer;
|
||||||
char out_buff[sizeof(TrackerHeader)+FDFS_GROUP_NAME_MAX_LEN+128];
|
char out_buff[sizeof(TrackerHeader)+FDFS_GROUP_NAME_MAX_LEN+128];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *p;
|
char *p;
|
||||||
int out_bytes;
|
int out_bytes;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
|
|
@ -670,21 +570,21 @@ int storage_do_download_file_ex(ConnectionInfo *pTrackerServer, \
|
||||||
p += 8;
|
p += 8;
|
||||||
snprintf(p, sizeof(out_buff) - (p - out_buff), "%s", group_name);
|
snprintf(p, sizeof(out_buff) - (p - out_buff), "%s", group_name);
|
||||||
p += FDFS_GROUP_NAME_MAX_LEN;
|
p += FDFS_GROUP_NAME_MAX_LEN;
|
||||||
filename_len = snprintf(p, sizeof(out_buff) -
|
filename_len = snprintf(p, sizeof(out_buff) - (p - out_buff), \
|
||||||
(p - out_buff), "%s", remote_filename);
|
"%s", remote_filename);
|
||||||
p += filename_len;
|
p += filename_len;
|
||||||
out_bytes = p - out_buff;
|
out_bytes = p - out_buff;
|
||||||
long2buff(out_bytes - sizeof(TrackerHeader), pHeader->pkg_len);
|
long2buff(out_bytes - sizeof(TrackerHeader), pHeader->pkg_len);
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_DOWNLOAD_FILE;
|
pHeader->cmd = STORAGE_PROTO_CMD_DOWNLOAD_FILE;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
out_bytes, SF_G_NETWORK_TIMEOUT)) != 0)
|
out_bytes, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -701,7 +601,7 @@ int storage_do_download_file_ex(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if ((result=tcprecvfile(pStorageServer->sock, \
|
if ((result=tcprecvfile(pStorageServer->sock, \
|
||||||
*file_buff, in_bytes, 0, \
|
*file_buff, in_bytes, 0, \
|
||||||
SF_G_NETWORK_TIMEOUT, \
|
g_fdfs_network_timeout, \
|
||||||
&total_recv_bytes)) != 0)
|
&total_recv_bytes)) != 0)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
|
|
@ -748,14 +648,15 @@ int storage_do_download_file_ex(ConnectionInfo *pTrackerServer, \
|
||||||
recv_bytes = remain_bytes;
|
recv_bytes = remain_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((result=tcprecvdata_nb(pStorageServer->sock, buff,
|
if ((result=tcprecvdata_nb(pStorageServer->sock, buff, \
|
||||||
recv_bytes, SF_G_NETWORK_TIMEOUT)) != 0)
|
recv_bytes, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"recv data from storage server " \
|
||||||
"recv data from storage server %s:%u fail, "
|
"%s:%d fail, " \
|
||||||
"errno: %d, error info: %s", __LINE__,
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
formatted_ip, pStorageServer->port,
|
pStorageServer->ip_addr, \
|
||||||
|
pStorageServer->port, \
|
||||||
result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -893,16 +794,22 @@ int storage_do_upload_file1(ConnectionInfo *pTrackerServer, \
|
||||||
STORAGE_PROTO_CMD_UPLOAD_FILE and
|
STORAGE_PROTO_CMD_UPLOAD_FILE and
|
||||||
STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE:
|
STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE:
|
||||||
1 byte: store path index
|
1 byte: store path index
|
||||||
|
8 bytes: meta data bytes
|
||||||
8 bytes: file size
|
8 bytes: file size
|
||||||
FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name
|
FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name
|
||||||
|
meta data bytes: each meta data seperated by \x01,
|
||||||
|
name and value seperated by \x02
|
||||||
file size bytes: file content
|
file size bytes: file content
|
||||||
|
|
||||||
STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE:
|
STORAGE_PROTO_CMD_UPLOAD_SLAVE_FILE:
|
||||||
8 bytes: master filename length
|
8 bytes: master filename length
|
||||||
|
8 bytes: meta data bytes
|
||||||
8 bytes: file size
|
8 bytes: file size
|
||||||
FDFS_FILE_PREFIX_MAX_LEN bytes : filename prefix
|
FDFS_FILE_PREFIX_MAX_LEN bytes : filename prefix
|
||||||
FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name, do not include dot (.)
|
FDFS_FILE_EXT_NAME_MAX_LEN bytes: file ext name, do not include dot (.)
|
||||||
master filename bytes: master filename
|
master filename bytes: master filename
|
||||||
|
meta data bytes: each meta data seperated by \x01,
|
||||||
|
name and value seperated by \x02
|
||||||
file size bytes: file content
|
file size bytes: file content
|
||||||
**/
|
**/
|
||||||
int storage_do_upload_file(ConnectionInfo *pTrackerServer, \
|
int storage_do_upload_file(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
@ -920,10 +827,9 @@ int storage_do_upload_file(ConnectionInfo *pTrackerServer, \
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int64_t total_send_bytes;
|
int64_t total_send_bytes;
|
||||||
char in_buff[128];
|
char in_buff[128];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
ConnectionInfo storageServer;
|
ConnectionInfo storageServer;
|
||||||
bool new_connection = false;
|
bool new_connection;
|
||||||
bool bUploadSlave;
|
bool bUploadSlave;
|
||||||
int new_store_path;
|
int new_store_path;
|
||||||
int master_filename_len;
|
int master_filename_len;
|
||||||
|
|
@ -952,30 +858,26 @@ int storage_do_upload_file(ConnectionInfo *pTrackerServer, \
|
||||||
bUploadSlave = (strlen(group_name) > 0 && master_filename_len > 0);
|
bUploadSlave = (strlen(group_name) > 0 && master_filename_len > 0);
|
||||||
if (bUploadSlave)
|
if (bUploadSlave)
|
||||||
{
|
{
|
||||||
if ((result=storage_get_update_connection(pTrackerServer,
|
if ((result=storage_get_update_connection(pTrackerServer, \
|
||||||
&pStorageServer, group_name, master_filename,
|
&pStorageServer, group_name, master_filename, \
|
||||||
&storageServer, &new_connection)) != 0)
|
&storageServer, &new_connection)) != 0)
|
||||||
{
|
{
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else if ((result=storage_get_upload_connection(pTrackerServer, \
|
||||||
{
|
&pStorageServer, group_name, &storageServer, \
|
||||||
if ((result=storage_get_upload_connection(pTrackerServer,
|
&new_store_path, &new_connection)) != 0)
|
||||||
&pStorageServer, group_name, &storageServer,
|
{
|
||||||
&new_store_path, &new_connection)) != 0)
|
*group_name = '\0';
|
||||||
{
|
return result;
|
||||||
*group_name = '\0';
|
}
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*group_name = '\0';
|
*group_name = '\0';
|
||||||
|
|
||||||
/*
|
/*
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
//logInfo("upload to storage %s:%d\n", \
|
||||||
//logInfo("upload to storage %s:%u\n", \
|
pStorageServer->ip_addr, pStorageServer->port);
|
||||||
formatted_ip, pStorageServer->port);
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
do
|
do
|
||||||
|
|
@ -1041,21 +943,21 @@ int storage_do_upload_file(ConnectionInfo *pTrackerServer, \
|
||||||
pHeader->cmd = cmd;
|
pHeader->cmd = cmd;
|
||||||
pHeader->status = 0;
|
pHeader->status = 0;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
||||||
{
|
{
|
||||||
if ((result=tcpsendfile(pStorageServer->sock, file_buff, \
|
if ((result=tcpsendfile(pStorageServer->sock, file_buff, \
|
||||||
file_size, SF_G_NETWORK_TIMEOUT, \
|
file_size, g_fdfs_network_timeout, \
|
||||||
&total_send_bytes)) != 0)
|
&total_send_bytes)) != 0)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
|
|
@ -1063,15 +965,15 @@ int storage_do_upload_file(ConnectionInfo *pTrackerServer, \
|
||||||
}
|
}
|
||||||
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
||||||
{
|
{
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, \
|
||||||
(char *)file_buff, file_size,
|
(char *)file_buff, file_size, \
|
||||||
SF_G_NETWORK_TIMEOUT)) != 0)
|
g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1097,11 +999,12 @@ int storage_do_upload_file(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes <= FDFS_GROUP_NAME_MAX_LEN)
|
if (in_bytes <= FDFS_GROUP_NAME_MAX_LEN)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"storage server %s:%d response data " \
|
||||||
"storage server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid, " \
|
||||||
"is invalid, should > %d", __LINE__, formatted_ip,
|
"should > %d", __LINE__, \
|
||||||
pStorageServer->port, in_bytes, FDFS_GROUP_NAME_MAX_LEN);
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
|
in_bytes, FDFS_GROUP_NAME_MAX_LEN);
|
||||||
result = EINVAL;
|
result = EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -1261,7 +1164,6 @@ int storage_set_metadata(ConnectionInfo *pTrackerServer, \
|
||||||
ConnectionInfo storageServer;
|
ConnectionInfo storageServer;
|
||||||
char out_buff[sizeof(TrackerHeader)+2*FDFS_PROTO_PKG_LEN_SIZE+\
|
char out_buff[sizeof(TrackerHeader)+2*FDFS_PROTO_PKG_LEN_SIZE+\
|
||||||
FDFS_GROUP_NAME_MAX_LEN+128];
|
FDFS_GROUP_NAME_MAX_LEN+128];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char in_buff[1];
|
char in_buff[1];
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
char *pBuff;
|
char *pBuff;
|
||||||
|
|
@ -1272,8 +1174,8 @@ int storage_set_metadata(ConnectionInfo *pTrackerServer, \
|
||||||
char *pEnd;
|
char *pEnd;
|
||||||
bool new_connection;
|
bool new_connection;
|
||||||
|
|
||||||
if ((result=storage_get_update_connection(pTrackerServer,
|
if ((result=storage_get_update_connection(pTrackerServer, \
|
||||||
&pStorageServer, group_name, filename,
|
&pStorageServer, group_name, filename, \
|
||||||
&storageServer, &new_connection)) != 0)
|
&storageServer, &new_connection)) != 0)
|
||||||
{
|
{
|
||||||
return result;
|
return result;
|
||||||
|
|
@ -1318,29 +1220,30 @@ int storage_set_metadata(ConnectionInfo *pTrackerServer, \
|
||||||
p += filename_len;
|
p += filename_len;
|
||||||
|
|
||||||
pHeader = (TrackerHeader *)out_buff;
|
pHeader = (TrackerHeader *)out_buff;
|
||||||
long2buff((int)(p - (out_buff + sizeof(TrackerHeader))) +
|
long2buff((int)(p - (out_buff + sizeof(TrackerHeader))) + \
|
||||||
meta_bytes, pHeader->pkg_len);
|
meta_bytes, pHeader->pkg_len);
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_SET_METADATA;
|
pHeader->cmd = STORAGE_PROTO_CMD_SET_METADATA;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (meta_bytes > 0 && (result=tcpsenddata_nb(pStorageServer->sock,
|
if (meta_bytes > 0 && (result=tcpsenddata_nb(pStorageServer->sock, \
|
||||||
meta_buff, meta_bytes, SF_G_NETWORK_TIMEOUT)) != 0)
|
meta_buff, meta_bytes, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1441,7 +1344,6 @@ int storage_client_create_link(ConnectionInfo *pTrackerServer, \
|
||||||
FDFS_GROUP_NAME_MAX_LEN + FDFS_FILE_PREFIX_MAX_LEN + \
|
FDFS_GROUP_NAME_MAX_LEN + FDFS_FILE_PREFIX_MAX_LEN + \
|
||||||
FDFS_FILE_EXT_NAME_MAX_LEN + 256];
|
FDFS_FILE_EXT_NAME_MAX_LEN + 256];
|
||||||
char in_buff[128];
|
char in_buff[128];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *p;
|
char *p;
|
||||||
int group_name_len;
|
int group_name_len;
|
||||||
int master_filename_len;
|
int master_filename_len;
|
||||||
|
|
@ -1537,13 +1439,13 @@ int storage_client_create_link(ConnectionInfo *pTrackerServer, \
|
||||||
long2buff(p - out_buff - sizeof(TrackerHeader), pHeader->pkg_len);
|
long2buff(p - out_buff - sizeof(TrackerHeader), pHeader->pkg_len);
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_CREATE_LINK;
|
pHeader->cmd = STORAGE_PROTO_CMD_CREATE_LINK;
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1559,11 +1461,12 @@ int storage_client_create_link(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes <= FDFS_GROUP_NAME_MAX_LEN)
|
if (in_bytes <= FDFS_GROUP_NAME_MAX_LEN)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"storage server %s:%d response data " \
|
||||||
"storage server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid, " \
|
||||||
"is invalid, should > %d", __LINE__, formatted_ip,
|
"should > %d", __LINE__, \
|
||||||
pStorageServer->port, in_bytes, FDFS_GROUP_NAME_MAX_LEN);
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
|
in_bytes, FDFS_GROUP_NAME_MAX_LEN);
|
||||||
result = EINVAL;
|
result = EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -1792,7 +1695,6 @@ int storage_do_append_file(ConnectionInfo *pTrackerServer, \
|
||||||
TrackerHeader *pHeader;
|
TrackerHeader *pHeader;
|
||||||
int result;
|
int result;
|
||||||
char out_buff[512];
|
char out_buff[512];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *p;
|
char *p;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int64_t total_send_bytes;
|
int64_t total_send_bytes;
|
||||||
|
|
@ -1810,9 +1712,8 @@ int storage_do_append_file(ConnectionInfo *pTrackerServer, \
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
//printf("upload to storage %s:%d\n", \
|
||||||
//printf("upload to storage %s:%u\n", \
|
pStorageServer->ip_addr, pStorageServer->port);
|
||||||
formatted_ip, pStorageServer->port);
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
do
|
do
|
||||||
|
|
@ -1828,26 +1729,26 @@ int storage_do_append_file(ConnectionInfo *pTrackerServer, \
|
||||||
memcpy(p, appender_filename, appender_filename_len);
|
memcpy(p, appender_filename, appender_filename_len);
|
||||||
p += appender_filename_len;
|
p += appender_filename_len;
|
||||||
|
|
||||||
long2buff((p - out_buff) + file_size - sizeof(TrackerHeader),
|
long2buff((p - out_buff) + file_size - sizeof(TrackerHeader), \
|
||||||
pHeader->pkg_len);
|
pHeader->pkg_len);
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_APPEND_FILE;
|
pHeader->cmd = STORAGE_PROTO_CMD_APPEND_FILE;
|
||||||
pHeader->status = 0;
|
pHeader->status = 0;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
||||||
{
|
{
|
||||||
if ((result=tcpsendfile(pStorageServer->sock, file_buff, \
|
if ((result=tcpsendfile(pStorageServer->sock, file_buff, \
|
||||||
file_size, SF_G_NETWORK_TIMEOUT, \
|
file_size, g_fdfs_network_timeout, \
|
||||||
&total_send_bytes)) != 0)
|
&total_send_bytes)) != 0)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
|
|
@ -1855,15 +1756,15 @@ int storage_do_append_file(ConnectionInfo *pTrackerServer, \
|
||||||
}
|
}
|
||||||
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
||||||
{
|
{
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, \
|
||||||
(char *)file_buff, file_size,
|
(char *)file_buff, file_size, \
|
||||||
SF_G_NETWORK_TIMEOUT)) != 0)
|
g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1887,10 +1788,10 @@ int storage_do_append_file(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes != 0)
|
if (in_bytes != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"storage server %s:%d response data " \
|
||||||
"storage server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid, " \
|
||||||
"is invalid, should == 0", __LINE__, formatted_ip,
|
"should == 0", __LINE__, pStorageServer->ip_addr, \
|
||||||
pStorageServer->port, in_bytes);
|
pStorageServer->port, in_bytes);
|
||||||
result = EINVAL;
|
result = EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|
@ -1923,7 +1824,6 @@ int storage_do_modify_file(ConnectionInfo *pTrackerServer, \
|
||||||
TrackerHeader *pHeader;
|
TrackerHeader *pHeader;
|
||||||
int result;
|
int result;
|
||||||
char out_buff[512];
|
char out_buff[512];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *p;
|
char *p;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int64_t total_send_bytes;
|
int64_t total_send_bytes;
|
||||||
|
|
@ -1940,9 +1840,8 @@ int storage_do_modify_file(ConnectionInfo *pTrackerServer, \
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
//printf("upload to storage %s:%d\n", \
|
||||||
//printf("upload to storage %s:%u\n", \
|
pStorageServer->ip_addr, pStorageServer->port);
|
||||||
formatted_ip, pStorageServer->port);
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
do
|
do
|
||||||
|
|
@ -1961,41 +1860,42 @@ int storage_do_modify_file(ConnectionInfo *pTrackerServer, \
|
||||||
memcpy(p, appender_filename, appender_filename_len);
|
memcpy(p, appender_filename, appender_filename_len);
|
||||||
p += appender_filename_len;
|
p += appender_filename_len;
|
||||||
|
|
||||||
long2buff((p - out_buff) + file_size - sizeof(TrackerHeader),
|
long2buff((p - out_buff) + file_size - sizeof(TrackerHeader), \
|
||||||
pHeader->pkg_len);
|
pHeader->pkg_len);
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_MODIFY_FILE;
|
pHeader->cmd = STORAGE_PROTO_CMD_MODIFY_FILE;
|
||||||
pHeader->status = 0;
|
pHeader->status = 0;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
||||||
{
|
{
|
||||||
if ((result=tcpsendfile(pStorageServer->sock, file_buff,
|
if ((result=tcpsendfile(pStorageServer->sock, file_buff, \
|
||||||
file_size, SF_G_NETWORK_TIMEOUT, &total_send_bytes)) != 0)
|
file_size, g_fdfs_network_timeout, \
|
||||||
|
&total_send_bytes)) != 0)
|
||||||
{
|
{
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
||||||
{
|
{
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, \
|
||||||
(char *)file_buff, file_size,
|
(char *)file_buff, file_size, \
|
||||||
SF_G_NETWORK_TIMEOUT)) != 0)
|
g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -2019,10 +1919,10 @@ int storage_do_modify_file(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes != 0)
|
if (in_bytes != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"storage server %s:%d response data " \
|
||||||
"storage server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid, " \
|
||||||
"is invalid, should == 0", __LINE__, formatted_ip,
|
"should == 0", __LINE__, pStorageServer->ip_addr, \
|
||||||
pStorageServer->port, in_bytes);
|
pStorageServer->port, in_bytes);
|
||||||
result = EINVAL;
|
result = EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|
@ -2245,10 +2145,10 @@ int fdfs_get_file_info_ex(const char *group_name, const char *remote_filename, \
|
||||||
char buff[64];
|
char buff[64];
|
||||||
|
|
||||||
memset(pFileInfo, 0, sizeof(FDFSFileInfo));
|
memset(pFileInfo, 0, sizeof(FDFSFileInfo));
|
||||||
if (!g_base64_context_inited)
|
if (!the_base64_context_inited)
|
||||||
{
|
{
|
||||||
g_base64_context_inited = 1;
|
the_base64_context_inited = 1;
|
||||||
base64_init_ex(&g_fdfs_base64_context, 0, '-', '_', '.');
|
base64_init_ex(&the_base64_context, 0, '-', '_', '.');
|
||||||
}
|
}
|
||||||
|
|
||||||
filename_len = strlen(remote_filename);
|
filename_len = strlen(remote_filename);
|
||||||
|
|
@ -2262,7 +2162,7 @@ int fdfs_get_file_info_ex(const char *group_name, const char *remote_filename, \
|
||||||
}
|
}
|
||||||
|
|
||||||
memset(buff, 0, sizeof(buff));
|
memset(buff, 0, sizeof(buff));
|
||||||
base64_decode_auto(&g_fdfs_base64_context, (char *)remote_filename + \
|
base64_decode_auto(&the_base64_context, (char *)remote_filename + \
|
||||||
FDFS_LOGIC_FILE_PATH_LEN, FDFS_FILENAME_BASE64_LENGTH, \
|
FDFS_LOGIC_FILE_PATH_LEN, FDFS_FILENAME_BASE64_LENGTH, \
|
||||||
buff, &buff_len);
|
buff, &buff_len);
|
||||||
|
|
||||||
|
|
@ -2392,7 +2292,6 @@ int storage_truncate_file(ConnectionInfo *pTrackerServer, \
|
||||||
TrackerHeader *pHeader;
|
TrackerHeader *pHeader;
|
||||||
int result;
|
int result;
|
||||||
char out_buff[512];
|
char out_buff[512];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *p;
|
char *p;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
ConnectionInfo storageServer;
|
ConnectionInfo storageServer;
|
||||||
|
|
@ -2408,9 +2307,8 @@ int storage_truncate_file(ConnectionInfo *pTrackerServer, \
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
//printf("upload to storage %s:%d\n", \
|
||||||
//printf("upload to storage %s:%u\n", \
|
pStorageServer->ip_addr, pStorageServer->port);
|
||||||
formatted_ip, pStorageServer->port);
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
do
|
do
|
||||||
|
|
@ -2426,19 +2324,19 @@ int storage_truncate_file(ConnectionInfo *pTrackerServer, \
|
||||||
memcpy(p, appender_filename, appender_filename_len);
|
memcpy(p, appender_filename, appender_filename_len);
|
||||||
p += appender_filename_len;
|
p += appender_filename_len;
|
||||||
|
|
||||||
long2buff((p - out_buff) - sizeof(TrackerHeader),
|
long2buff((p - out_buff) - sizeof(TrackerHeader), \
|
||||||
pHeader->pkg_len);
|
pHeader->pkg_len);
|
||||||
pHeader->cmd = STORAGE_PROTO_CMD_TRUNCATE_FILE;
|
pHeader->cmd = STORAGE_PROTO_CMD_TRUNCATE_FILE;
|
||||||
pHeader->status = 0;
|
pHeader->status = 0;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff, \
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to storage server %s:%d fail, " \
|
||||||
"send data to storage server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pStorageServer->ip_addr, pStorageServer->port, \
|
||||||
pStorageServer->port, result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2452,10 +2350,10 @@ int storage_truncate_file(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes != 0)
|
if (in_bytes != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"storage server %s:%d response data " \
|
||||||
"storage server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid, " \
|
||||||
"is invalid, should == 0", __LINE__, formatted_ip,
|
"should == 0", __LINE__, pStorageServer->ip_addr, \
|
||||||
pStorageServer->port, in_bytes);
|
pStorageServer->port, in_bytes);
|
||||||
result = EINVAL;
|
result = EINVAL;
|
||||||
break;
|
break;
|
||||||
|
|
@ -2479,7 +2377,6 @@ int storage_regenerate_appender_filename(ConnectionInfo *pTrackerServer,
|
||||||
int result;
|
int result;
|
||||||
char out_buff[512];
|
char out_buff[512];
|
||||||
char in_buff[256];
|
char in_buff[256];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *p;
|
char *p;
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
|
|
@ -2509,13 +2406,13 @@ int storage_regenerate_appender_filename(ConnectionInfo *pTrackerServer,
|
||||||
pHeader->status = 0;
|
pHeader->status = 0;
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
if ((result=tcpsenddata_nb(pStorageServer->sock, out_buff,
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
|
||||||
logError("file: "__FILE__", line: %d, "
|
logError("file: "__FILE__", line: %d, "
|
||||||
"send data to storage server %s:%u fail, "
|
"send data to storage server %s:%d fail, "
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
"errno: %d, error info: %s", __LINE__,
|
||||||
pStorageServer->port, result, STRERROR(result));
|
pStorageServer->ip_addr, pStorageServer->port,
|
||||||
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2531,11 +2428,12 @@ int storage_regenerate_appender_filename(ConnectionInfo *pTrackerServer,
|
||||||
|
|
||||||
if (in_bytes <= FDFS_GROUP_NAME_MAX_LEN)
|
if (in_bytes <= FDFS_GROUP_NAME_MAX_LEN)
|
||||||
{
|
{
|
||||||
format_ip_address(pStorageServer->ip_addr, formatted_ip);
|
|
||||||
logError("file: "__FILE__", line: %d, "
|
logError("file: "__FILE__", line: %d, "
|
||||||
"storage server %s:%u response data length: %"PRId64" "
|
"storage server %s:%d response data "
|
||||||
"is invalid, should > %d", __LINE__, formatted_ip,
|
"length: %"PRId64" is invalid, "
|
||||||
pStorageServer->port, in_bytes, FDFS_GROUP_NAME_MAX_LEN);
|
"should > %d", __LINE__,
|
||||||
|
pStorageServer->ip_addr, pStorageServer->port,
|
||||||
|
in_bytes, FDFS_GROUP_NAME_MAX_LEN);
|
||||||
result = EINVAL;
|
result = EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
COMPILE = $(CC) $(CFLAGS)
|
COMPILE = $(CC) $(CFLAGS)
|
||||||
INC_PATH = -I/usr/include/fastcommon -I/usr/include/fastdfs \
|
INC_PATH = -I/usr/include/fastcommon -I/usr/include/fastdfs \
|
||||||
-I/usr/local/include/fastcommon -I/usr/local/include/fastdfs
|
-I/usr/local/include/fastcommon -I/usr/local/include/fastdfs
|
||||||
LIB_PATH = -L/usr/local/lib -lfastcommon -lserverframe -lfdfsclient $(LIBS)
|
LIB_PATH = -L/usr/local/lib -lfastcommon -lfdfsclient $(LIBS)
|
||||||
TARGET_PATH = $(TARGET_PATH)
|
TARGET_PATH = $(TARGET_PATH)
|
||||||
|
|
||||||
ALL_OBJS =
|
ALL_OBJS =
|
||||||
|
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
../fdfs_monitor.c
|
|
||||||
|
|
@ -0,0 +1,596 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2008 Happy Fish / YuQing
|
||||||
|
*
|
||||||
|
* FastDFS may be copied only under the terms of the GNU General
|
||||||
|
* Public License V3, which may be found in the FastDFS source kit.
|
||||||
|
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
|
||||||
|
**/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <netdb.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include "fastcommon/sockopt.h"
|
||||||
|
#include "fastcommon/logger.h"
|
||||||
|
#include "client_global.h"
|
||||||
|
#include "fdfs_global.h"
|
||||||
|
#include "fdfs_client.h"
|
||||||
|
|
||||||
|
static ConnectionInfo *pTrackerServer;
|
||||||
|
|
||||||
|
static int list_all_groups(const char *group_name);
|
||||||
|
|
||||||
|
static void usage(char *argv[])
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> [-h <tracker_server>] "
|
||||||
|
"[list|delete|set_trunk_server <group_name> [storage_id]]\n"
|
||||||
|
"\tthe tracker server format: host[:port], "
|
||||||
|
"the tracker default port is %d\n\n",
|
||||||
|
argv[0], FDFS_TRACKER_SERVER_DEF_PORT);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
char *conf_filename;
|
||||||
|
int result;
|
||||||
|
char *op_type;
|
||||||
|
char *tracker_server;
|
||||||
|
int arg_index;
|
||||||
|
char *group_name;
|
||||||
|
|
||||||
|
if (argc < 2)
|
||||||
|
{
|
||||||
|
usage(argv);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker_server = NULL;
|
||||||
|
conf_filename = argv[1];
|
||||||
|
arg_index = 2;
|
||||||
|
|
||||||
|
if (arg_index >= argc)
|
||||||
|
{
|
||||||
|
op_type = "list";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int len;
|
||||||
|
|
||||||
|
len = strlen(argv[arg_index]);
|
||||||
|
if (len >= 2 && strncmp(argv[arg_index], "-h", 2) == 0)
|
||||||
|
{
|
||||||
|
if (len == 2)
|
||||||
|
{
|
||||||
|
arg_index++;
|
||||||
|
if (arg_index >= argc)
|
||||||
|
{
|
||||||
|
usage(argv);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker_server = argv[arg_index++];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
tracker_server = argv[arg_index] + 2;
|
||||||
|
arg_index++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg_index < argc)
|
||||||
|
{
|
||||||
|
op_type = argv[arg_index++];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
op_type = "list";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
op_type = argv[arg_index++];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log_init();
|
||||||
|
g_log_context.log_level = LOG_DEBUG;
|
||||||
|
ignore_signal_pipe();
|
||||||
|
|
||||||
|
if ((result=fdfs_client_init(conf_filename)) != 0)
|
||||||
|
{
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
load_log_level_ex(conf_filename);
|
||||||
|
|
||||||
|
if (tracker_server == NULL)
|
||||||
|
{
|
||||||
|
if (g_tracker_group.server_count > 1)
|
||||||
|
{
|
||||||
|
srand(time(NULL));
|
||||||
|
rand(); //discard the first
|
||||||
|
g_tracker_group.server_index = (int)( \
|
||||||
|
(g_tracker_group.server_count * (double)rand()) \
|
||||||
|
/ (double)RAND_MAX);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
ConnectionInfo conn;
|
||||||
|
|
||||||
|
if ((result=conn_pool_parse_server_info(tracker_server, &conn,
|
||||||
|
FDFS_TRACKER_SERVER_DEF_PORT)) != 0)
|
||||||
|
{
|
||||||
|
printf("resolve ip address of tracker server: %s "
|
||||||
|
"fail!, error info: %s\n", tracker_server, hstrerror(h_errno));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i=0; i<g_tracker_group.server_count; i++)
|
||||||
|
{
|
||||||
|
if (fdfs_server_contain1(g_tracker_group.servers + i, &conn))
|
||||||
|
{
|
||||||
|
g_tracker_group.server_index = i;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i == g_tracker_group.server_count)
|
||||||
|
{
|
||||||
|
printf("tracker server: %s not exists!\n", tracker_server);
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("server_count=%d, server_index=%d\n",
|
||||||
|
g_tracker_group.server_count, g_tracker_group.server_index);
|
||||||
|
|
||||||
|
pTrackerServer = tracker_get_connection();
|
||||||
|
if (pTrackerServer == NULL)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return errno != 0 ? errno : ECONNREFUSED;
|
||||||
|
}
|
||||||
|
printf("\ntracker server is %s:%d\n\n", pTrackerServer->ip_addr, pTrackerServer->port);
|
||||||
|
|
||||||
|
if (arg_index < argc)
|
||||||
|
{
|
||||||
|
group_name = argv[arg_index++];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
group_name = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (strcmp(op_type, "list") == 0)
|
||||||
|
{
|
||||||
|
if (group_name == NULL)
|
||||||
|
{
|
||||||
|
result = list_all_groups(NULL);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result = list_all_groups(group_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (strcmp(op_type, "delete") == 0)
|
||||||
|
{
|
||||||
|
if (arg_index >= argc)
|
||||||
|
{
|
||||||
|
if ((result=tracker_delete_group(&g_tracker_group, \
|
||||||
|
group_name)) == 0)
|
||||||
|
{
|
||||||
|
printf("delete group: %s success\n", \
|
||||||
|
group_name);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("delete group: %s fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
group_name, result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
char *storage_id;
|
||||||
|
|
||||||
|
storage_id = argv[arg_index++];
|
||||||
|
if ((result=tracker_delete_storage(&g_tracker_group, \
|
||||||
|
group_name, storage_id)) == 0)
|
||||||
|
{
|
||||||
|
printf("delete storage server %s::%s success\n", \
|
||||||
|
group_name, storage_id);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("delete storage server %s::%s fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
group_name, storage_id, \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (strcmp(op_type, "set_trunk_server") == 0)
|
||||||
|
{
|
||||||
|
char *storage_id;
|
||||||
|
char new_trunk_server_id[FDFS_STORAGE_ID_MAX_SIZE];
|
||||||
|
|
||||||
|
if (group_name == NULL)
|
||||||
|
{
|
||||||
|
usage(argv);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (arg_index >= argc)
|
||||||
|
{
|
||||||
|
storage_id = "";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
storage_id = argv[arg_index++];
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((result=tracker_set_trunk_server(&g_tracker_group, \
|
||||||
|
group_name, storage_id, new_trunk_server_id)) == 0)
|
||||||
|
{
|
||||||
|
printf("set trunk server %s::%s success, " \
|
||||||
|
"new trunk server: %s\n", group_name, \
|
||||||
|
storage_id, new_trunk_server_id);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("set trunk server %s::%s fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
group_name, storage_id, \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("Invalid command %s\n\n", op_type);
|
||||||
|
usage(argv);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker_close_connection_ex(pTrackerServer, true);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int list_storages(FDFSGroupStat *pGroupStat)
|
||||||
|
{
|
||||||
|
int result;
|
||||||
|
int storage_count;
|
||||||
|
FDFSStorageInfo storage_infos[FDFS_MAX_SERVERS_EACH_GROUP];
|
||||||
|
FDFSStorageInfo *p;
|
||||||
|
FDFSStorageInfo *pStorage;
|
||||||
|
FDFSStorageInfo *pStorageEnd;
|
||||||
|
FDFSStorageStat *pStorageStat;
|
||||||
|
char szJoinTime[32];
|
||||||
|
char szUpTime[32];
|
||||||
|
char szLastHeartBeatTime[32];
|
||||||
|
char szSrcUpdTime[32];
|
||||||
|
char szSyncUpdTime[32];
|
||||||
|
char szSyncedTimestamp[32];
|
||||||
|
char szSyncedDelaySeconds[128];
|
||||||
|
char szHostname[128];
|
||||||
|
char szHostnamePrompt[128+8];
|
||||||
|
int k;
|
||||||
|
int max_last_source_update;
|
||||||
|
|
||||||
|
printf( "group name = %s\n" \
|
||||||
|
"disk total space = %"PRId64" MB\n" \
|
||||||
|
"disk free space = %"PRId64" MB\n" \
|
||||||
|
"trunk free space = %"PRId64" MB\n" \
|
||||||
|
"storage server count = %d\n" \
|
||||||
|
"active server count = %d\n" \
|
||||||
|
"storage server port = %d\n" \
|
||||||
|
"storage HTTP port = %d\n" \
|
||||||
|
"store path count = %d\n" \
|
||||||
|
"subdir count per path = %d\n" \
|
||||||
|
"current write server index = %d\n" \
|
||||||
|
"current trunk file id = %d\n\n", \
|
||||||
|
pGroupStat->group_name, \
|
||||||
|
pGroupStat->total_mb, \
|
||||||
|
pGroupStat->free_mb, \
|
||||||
|
pGroupStat->trunk_free_mb, \
|
||||||
|
pGroupStat->count, \
|
||||||
|
pGroupStat->active_count, \
|
||||||
|
pGroupStat->storage_port, \
|
||||||
|
pGroupStat->storage_http_port, \
|
||||||
|
pGroupStat->store_path_count, \
|
||||||
|
pGroupStat->subdir_count_per_path, \
|
||||||
|
pGroupStat->current_write_server, \
|
||||||
|
pGroupStat->current_trunk_file_id
|
||||||
|
);
|
||||||
|
|
||||||
|
result = tracker_list_servers(pTrackerServer, \
|
||||||
|
pGroupStat->group_name, NULL, \
|
||||||
|
storage_infos, FDFS_MAX_SERVERS_EACH_GROUP, \
|
||||||
|
&storage_count);
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
k = 0;
|
||||||
|
pStorageEnd = storage_infos + storage_count;
|
||||||
|
for (pStorage=storage_infos; pStorage<pStorageEnd; \
|
||||||
|
pStorage++)
|
||||||
|
{
|
||||||
|
max_last_source_update = 0;
|
||||||
|
for (p=storage_infos; p<pStorageEnd; p++)
|
||||||
|
{
|
||||||
|
if (p != pStorage && p->stat.last_source_update
|
||||||
|
> max_last_source_update)
|
||||||
|
{
|
||||||
|
max_last_source_update = \
|
||||||
|
p->stat.last_source_update;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pStorageStat = &(pStorage->stat);
|
||||||
|
if (max_last_source_update == 0)
|
||||||
|
{
|
||||||
|
*szSyncedDelaySeconds = '\0';
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (pStorageStat->last_synced_timestamp == 0)
|
||||||
|
{
|
||||||
|
strcpy(szSyncedDelaySeconds, "(never synced)");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
int delay_seconds;
|
||||||
|
int remain_seconds;
|
||||||
|
int day;
|
||||||
|
int hour;
|
||||||
|
int minute;
|
||||||
|
int second;
|
||||||
|
char szDelayTime[64];
|
||||||
|
|
||||||
|
delay_seconds = (int)(max_last_source_update -
|
||||||
|
pStorageStat->last_synced_timestamp);
|
||||||
|
if (delay_seconds < 0)
|
||||||
|
{
|
||||||
|
delay_seconds = 0;
|
||||||
|
}
|
||||||
|
day = delay_seconds / (24 * 3600);
|
||||||
|
remain_seconds = delay_seconds % (24 * 3600);
|
||||||
|
hour = remain_seconds / 3600;
|
||||||
|
remain_seconds %= 3600;
|
||||||
|
minute = remain_seconds / 60;
|
||||||
|
second = remain_seconds % 60;
|
||||||
|
|
||||||
|
if (day != 0)
|
||||||
|
{
|
||||||
|
sprintf(szDelayTime, "%d days " \
|
||||||
|
"%02dh:%02dm:%02ds", \
|
||||||
|
day, hour, minute, second);
|
||||||
|
}
|
||||||
|
else if (hour != 0)
|
||||||
|
{
|
||||||
|
sprintf(szDelayTime, "%02dh:%02dm:%02ds", \
|
||||||
|
hour, minute, second);
|
||||||
|
}
|
||||||
|
else if (minute != 0)
|
||||||
|
{
|
||||||
|
sprintf(szDelayTime, "%02dm:%02ds", minute, second);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
sprintf(szDelayTime, "%ds", second);
|
||||||
|
}
|
||||||
|
|
||||||
|
sprintf(szSyncedDelaySeconds, "(%s delay)", szDelayTime);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getHostnameByIp(pStorage->ip_addr, szHostname, sizeof(szHostname));
|
||||||
|
if (*szHostname != '\0')
|
||||||
|
{
|
||||||
|
sprintf(szHostnamePrompt, " (%s)", szHostname);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
*szHostnamePrompt = '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pStorage->up_time != 0)
|
||||||
|
{
|
||||||
|
formatDatetime(pStorage->up_time, \
|
||||||
|
"%Y-%m-%d %H:%M:%S", \
|
||||||
|
szUpTime, sizeof(szUpTime));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
*szUpTime = '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
printf( "\tStorage %d:\n" \
|
||||||
|
"\t\tid = %s\n" \
|
||||||
|
"\t\tip_addr = %s%s %s\n" \
|
||||||
|
"\t\thttp domain = %s\n" \
|
||||||
|
"\t\tversion = %s\n" \
|
||||||
|
"\t\tjoin time = %s\n" \
|
||||||
|
"\t\tup time = %s\n" \
|
||||||
|
"\t\ttotal storage = %d MB\n" \
|
||||||
|
"\t\tfree storage = %d MB\n" \
|
||||||
|
"\t\tupload priority = %d\n" \
|
||||||
|
"\t\tstore_path_count = %d\n" \
|
||||||
|
"\t\tsubdir_count_per_path = %d\n" \
|
||||||
|
"\t\tstorage_port = %d\n" \
|
||||||
|
"\t\tstorage_http_port = %d\n" \
|
||||||
|
"\t\tcurrent_write_path = %d\n" \
|
||||||
|
"\t\tsource storage id = %s\n" \
|
||||||
|
"\t\tif_trunk_server = %d\n" \
|
||||||
|
"\t\tconnection.alloc_count = %d\n" \
|
||||||
|
"\t\tconnection.current_count = %d\n" \
|
||||||
|
"\t\tconnection.max_count = %d\n" \
|
||||||
|
"\t\ttotal_upload_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_upload_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_append_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_append_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_modify_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_modify_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_truncate_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_truncate_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_set_meta_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_set_meta_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_delete_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_delete_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_download_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_download_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_get_meta_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_get_meta_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_create_link_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_create_link_count = %"PRId64"\n"\
|
||||||
|
"\t\ttotal_delete_link_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_delete_link_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_upload_bytes = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_upload_bytes = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_append_bytes = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_append_bytes = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_modify_bytes = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_modify_bytes = %"PRId64"\n" \
|
||||||
|
"\t\tstotal_download_bytes = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_download_bytes = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_sync_in_bytes = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_sync_in_bytes = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_sync_out_bytes = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_sync_out_bytes = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_file_open_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_file_open_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_file_read_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_file_read_count = %"PRId64"\n" \
|
||||||
|
"\t\ttotal_file_write_count = %"PRId64"\n" \
|
||||||
|
"\t\tsuccess_file_write_count = %"PRId64"\n" \
|
||||||
|
"\t\tlast_heart_beat_time = %s\n" \
|
||||||
|
"\t\tlast_source_update = %s\n" \
|
||||||
|
"\t\tlast_sync_update = %s\n" \
|
||||||
|
"\t\tlast_synced_timestamp = %s %s\n", \
|
||||||
|
++k, pStorage->id, pStorage->ip_addr, \
|
||||||
|
szHostnamePrompt, get_storage_status_caption( \
|
||||||
|
pStorage->status), pStorage->domain_name, \
|
||||||
|
pStorage->version, \
|
||||||
|
formatDatetime(pStorage->join_time, \
|
||||||
|
"%Y-%m-%d %H:%M:%S", \
|
||||||
|
szJoinTime, sizeof(szJoinTime)), \
|
||||||
|
szUpTime, pStorage->total_mb, \
|
||||||
|
pStorage->free_mb, \
|
||||||
|
pStorage->upload_priority, \
|
||||||
|
pStorage->store_path_count, \
|
||||||
|
pStorage->subdir_count_per_path, \
|
||||||
|
pStorage->storage_port, \
|
||||||
|
pStorage->storage_http_port, \
|
||||||
|
pStorage->current_write_path, \
|
||||||
|
pStorage->src_id, \
|
||||||
|
pStorage->if_trunk_server, \
|
||||||
|
pStorageStat->connection.alloc_count, \
|
||||||
|
pStorageStat->connection.current_count, \
|
||||||
|
pStorageStat->connection.max_count, \
|
||||||
|
pStorageStat->total_upload_count, \
|
||||||
|
pStorageStat->success_upload_count, \
|
||||||
|
pStorageStat->total_append_count, \
|
||||||
|
pStorageStat->success_append_count, \
|
||||||
|
pStorageStat->total_modify_count, \
|
||||||
|
pStorageStat->success_modify_count, \
|
||||||
|
pStorageStat->total_truncate_count, \
|
||||||
|
pStorageStat->success_truncate_count, \
|
||||||
|
pStorageStat->total_set_meta_count, \
|
||||||
|
pStorageStat->success_set_meta_count, \
|
||||||
|
pStorageStat->total_delete_count, \
|
||||||
|
pStorageStat->success_delete_count, \
|
||||||
|
pStorageStat->total_download_count, \
|
||||||
|
pStorageStat->success_download_count, \
|
||||||
|
pStorageStat->total_get_meta_count, \
|
||||||
|
pStorageStat->success_get_meta_count, \
|
||||||
|
pStorageStat->total_create_link_count, \
|
||||||
|
pStorageStat->success_create_link_count, \
|
||||||
|
pStorageStat->total_delete_link_count, \
|
||||||
|
pStorageStat->success_delete_link_count, \
|
||||||
|
pStorageStat->total_upload_bytes, \
|
||||||
|
pStorageStat->success_upload_bytes, \
|
||||||
|
pStorageStat->total_append_bytes, \
|
||||||
|
pStorageStat->success_append_bytes, \
|
||||||
|
pStorageStat->total_modify_bytes, \
|
||||||
|
pStorageStat->success_modify_bytes, \
|
||||||
|
pStorageStat->total_download_bytes, \
|
||||||
|
pStorageStat->success_download_bytes, \
|
||||||
|
pStorageStat->total_sync_in_bytes, \
|
||||||
|
pStorageStat->success_sync_in_bytes, \
|
||||||
|
pStorageStat->total_sync_out_bytes, \
|
||||||
|
pStorageStat->success_sync_out_bytes, \
|
||||||
|
pStorageStat->total_file_open_count, \
|
||||||
|
pStorageStat->success_file_open_count, \
|
||||||
|
pStorageStat->total_file_read_count, \
|
||||||
|
pStorageStat->success_file_read_count, \
|
||||||
|
pStorageStat->total_file_write_count, \
|
||||||
|
pStorageStat->success_file_write_count, \
|
||||||
|
formatDatetime(pStorageStat->last_heart_beat_time, \
|
||||||
|
"%Y-%m-%d %H:%M:%S", \
|
||||||
|
szLastHeartBeatTime, sizeof(szLastHeartBeatTime)), \
|
||||||
|
formatDatetime(pStorageStat->last_source_update, \
|
||||||
|
"%Y-%m-%d %H:%M:%S", \
|
||||||
|
szSrcUpdTime, sizeof(szSrcUpdTime)), \
|
||||||
|
formatDatetime(pStorageStat->last_sync_update, \
|
||||||
|
"%Y-%m-%d %H:%M:%S", \
|
||||||
|
szSyncUpdTime, sizeof(szSyncUpdTime)), \
|
||||||
|
formatDatetime(pStorageStat->last_synced_timestamp, \
|
||||||
|
"%Y-%m-%d %H:%M:%S", \
|
||||||
|
szSyncedTimestamp, sizeof(szSyncedTimestamp)),\
|
||||||
|
szSyncedDelaySeconds);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int list_all_groups(const char *group_name)
|
||||||
|
{
|
||||||
|
int result;
|
||||||
|
int group_count;
|
||||||
|
FDFSGroupStat group_stats[FDFS_MAX_GROUPS];
|
||||||
|
FDFSGroupStat *pGroupStat;
|
||||||
|
FDFSGroupStat *pGroupEnd;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
result = tracker_list_groups(pTrackerServer, \
|
||||||
|
group_stats, FDFS_MAX_GROUPS, \
|
||||||
|
&group_count);
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
tracker_close_all_connections();
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
pGroupEnd = group_stats + group_count;
|
||||||
|
if (group_name == NULL)
|
||||||
|
{
|
||||||
|
printf("group count: %d\n", group_count);
|
||||||
|
i = 0;
|
||||||
|
for (pGroupStat=group_stats; pGroupStat<pGroupEnd; \
|
||||||
|
pGroupStat++)
|
||||||
|
{
|
||||||
|
printf( "\nGroup %d:\n", ++i);
|
||||||
|
list_storages(pGroupStat);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
for (pGroupStat=group_stats; pGroupStat<pGroupEnd; \
|
||||||
|
pGroupStat++)
|
||||||
|
{
|
||||||
|
if (strcmp(pGroupStat->group_name, group_name) == 0)
|
||||||
|
{
|
||||||
|
list_storages(pGroupStat);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
../fdfs_test.c
|
|
||||||
|
|
@ -0,0 +1,691 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2008 Happy Fish / YuQing
|
||||||
|
*
|
||||||
|
* FastDFS may be copied only under the terms of the GNU General
|
||||||
|
* Public License V3, which may be found in the FastDFS source kit.
|
||||||
|
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
|
||||||
|
**/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include "fdfs_client.h"
|
||||||
|
#include "fdfs_global.h"
|
||||||
|
#include "fastcommon/base64.h"
|
||||||
|
#include "fastcommon/sockopt.h"
|
||||||
|
#include "fastcommon/logger.h"
|
||||||
|
#include "fdfs_http_shared.h"
|
||||||
|
|
||||||
|
int writeToFileCallback(void *arg, const int64_t file_size, const char *data, \
|
||||||
|
const int current_size)
|
||||||
|
{
|
||||||
|
if (arg == NULL)
|
||||||
|
{
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fwrite(data, current_size, 1, (FILE *)arg) != 1)
|
||||||
|
{
|
||||||
|
return errno != 0 ? errno : EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int uploadFileCallback(void *arg, const int64_t file_size, int sock)
|
||||||
|
{
|
||||||
|
int64_t total_send_bytes;
|
||||||
|
char *filename;
|
||||||
|
|
||||||
|
if (arg == NULL)
|
||||||
|
{
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
filename = (char *)arg;
|
||||||
|
return tcpsendfile(sock, filename, file_size, \
|
||||||
|
g_fdfs_network_timeout, &total_send_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
char *conf_filename;
|
||||||
|
char *local_filename;
|
||||||
|
ConnectionInfo *pTrackerServer;
|
||||||
|
ConnectionInfo *pStorageServer;
|
||||||
|
int result;
|
||||||
|
ConnectionInfo storageServer;
|
||||||
|
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
|
||||||
|
char remote_filename[256];
|
||||||
|
char master_filename[256];
|
||||||
|
FDFSMetaData meta_list[32];
|
||||||
|
int meta_count;
|
||||||
|
int i;
|
||||||
|
FDFSMetaData *pMetaList;
|
||||||
|
char token[32 + 1];
|
||||||
|
char file_id[128];
|
||||||
|
char file_url[256];
|
||||||
|
char szDatetime[20];
|
||||||
|
char szPortPart[16];
|
||||||
|
int url_len;
|
||||||
|
time_t ts;
|
||||||
|
char *file_buff;
|
||||||
|
int64_t file_size;
|
||||||
|
char *operation;
|
||||||
|
char *meta_buff;
|
||||||
|
int store_path_index;
|
||||||
|
FDFSFileInfo file_info;
|
||||||
|
|
||||||
|
printf("This is FastDFS client test program v%d.%02d\n" \
|
||||||
|
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
||||||
|
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
||||||
|
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
||||||
|
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
||||||
|
"for more detail.\n\n" \
|
||||||
|
, g_fdfs_version.major, g_fdfs_version.minor);
|
||||||
|
|
||||||
|
if (argc < 3)
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> <operation>\n" \
|
||||||
|
"\toperation: upload, download, getmeta, setmeta, " \
|
||||||
|
"delete and query_servers\n", argv[0]);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
log_init();
|
||||||
|
g_log_context.log_level = LOG_DEBUG;
|
||||||
|
|
||||||
|
conf_filename = argv[1];
|
||||||
|
operation = argv[2];
|
||||||
|
if ((result=fdfs_client_init(conf_filename)) != 0)
|
||||||
|
{
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
pTrackerServer = tracker_get_connection();
|
||||||
|
if (pTrackerServer == NULL)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return errno != 0 ? errno : ECONNREFUSED;
|
||||||
|
}
|
||||||
|
|
||||||
|
pStorageServer = NULL;
|
||||||
|
*group_name = '\0';
|
||||||
|
local_filename = NULL;
|
||||||
|
if (strcmp(operation, "upload") == 0)
|
||||||
|
{
|
||||||
|
int upload_type;
|
||||||
|
char *prefix_name;
|
||||||
|
const char *file_ext_name;
|
||||||
|
char slave_filename[256];
|
||||||
|
int slave_filename_len;
|
||||||
|
|
||||||
|
if (argc < 4)
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> upload " \
|
||||||
|
"<local_filename> [FILE | BUFF | CALLBACK] \n",\
|
||||||
|
argv[0]);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
local_filename = argv[3];
|
||||||
|
if (argc == 4)
|
||||||
|
{
|
||||||
|
upload_type = FDFS_UPLOAD_BY_FILE;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (strcmp(argv[4], "BUFF") == 0)
|
||||||
|
{
|
||||||
|
upload_type = FDFS_UPLOAD_BY_BUFF;
|
||||||
|
}
|
||||||
|
else if (strcmp(argv[4], "CALLBACK") == 0)
|
||||||
|
{
|
||||||
|
upload_type = FDFS_UPLOAD_BY_CALLBACK;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
upload_type = FDFS_UPLOAD_BY_FILE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
store_path_index = 0;
|
||||||
|
|
||||||
|
{
|
||||||
|
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
|
||||||
|
ConnectionInfo *pServer;
|
||||||
|
ConnectionInfo *pServerEnd;
|
||||||
|
int storage_count;
|
||||||
|
|
||||||
|
if ((result=tracker_query_storage_store_list_without_group( \
|
||||||
|
pTrackerServer, storageServers, \
|
||||||
|
FDFS_MAX_SERVERS_EACH_GROUP, &storage_count, \
|
||||||
|
group_name, &store_path_index)) == 0)
|
||||||
|
{
|
||||||
|
printf("tracker_query_storage_store_list_without_group: \n");
|
||||||
|
pServerEnd = storageServers + storage_count;
|
||||||
|
for (pServer=storageServers; pServer<pServerEnd; pServer++)
|
||||||
|
{
|
||||||
|
printf("\tserver %d. group_name=%s, " \
|
||||||
|
"ip_addr=%s, port=%d\n", \
|
||||||
|
(int)(pServer - storageServers) + 1, \
|
||||||
|
group_name, pServer->ip_addr, pServer->port);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((result=tracker_query_storage_store(pTrackerServer, \
|
||||||
|
&storageServer, group_name, &store_path_index)) != 0)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
printf("tracker_query_storage fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("group_name=%s, ip_addr=%s, port=%d\n", \
|
||||||
|
group_name, storageServer.ip_addr, \
|
||||||
|
storageServer.port);
|
||||||
|
|
||||||
|
if ((pStorageServer=tracker_make_connection(&storageServer, \
|
||||||
|
&result)) == NULL)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(&meta_list, 0, sizeof(meta_list));
|
||||||
|
meta_count = 0;
|
||||||
|
strcpy(meta_list[meta_count].name, "ext_name");
|
||||||
|
strcpy(meta_list[meta_count].value, "jpg");
|
||||||
|
meta_count++;
|
||||||
|
strcpy(meta_list[meta_count].name, "width");
|
||||||
|
strcpy(meta_list[meta_count].value, "160");
|
||||||
|
meta_count++;
|
||||||
|
strcpy(meta_list[meta_count].name, "height");
|
||||||
|
strcpy(meta_list[meta_count].value, "80");
|
||||||
|
meta_count++;
|
||||||
|
strcpy(meta_list[meta_count].name, "file_size");
|
||||||
|
strcpy(meta_list[meta_count].value, "115120");
|
||||||
|
meta_count++;
|
||||||
|
|
||||||
|
file_ext_name = fdfs_get_file_ext_name(local_filename);
|
||||||
|
*group_name = '\0';
|
||||||
|
|
||||||
|
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
||||||
|
{
|
||||||
|
result = storage_upload_by_filename(pTrackerServer, \
|
||||||
|
pStorageServer, store_path_index, \
|
||||||
|
local_filename, file_ext_name, \
|
||||||
|
meta_list, meta_count, \
|
||||||
|
group_name, remote_filename);
|
||||||
|
|
||||||
|
printf("storage_upload_by_filename\n");
|
||||||
|
}
|
||||||
|
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
||||||
|
{
|
||||||
|
char *file_content;
|
||||||
|
if ((result=getFileContent(local_filename, \
|
||||||
|
&file_content, &file_size)) == 0)
|
||||||
|
{
|
||||||
|
result = storage_upload_by_filebuff(pTrackerServer, \
|
||||||
|
pStorageServer, store_path_index, \
|
||||||
|
file_content, file_size, file_ext_name, \
|
||||||
|
meta_list, meta_count, \
|
||||||
|
group_name, remote_filename);
|
||||||
|
free(file_content);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("storage_upload_by_filebuff\n");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
struct stat stat_buf;
|
||||||
|
|
||||||
|
if (stat(local_filename, &stat_buf) == 0 && \
|
||||||
|
S_ISREG(stat_buf.st_mode))
|
||||||
|
{
|
||||||
|
file_size = stat_buf.st_size;
|
||||||
|
result = storage_upload_by_callback(pTrackerServer, \
|
||||||
|
pStorageServer, store_path_index, \
|
||||||
|
uploadFileCallback, local_filename, \
|
||||||
|
file_size, file_ext_name, \
|
||||||
|
meta_list, meta_count, \
|
||||||
|
group_name, remote_filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("storage_upload_by_callback\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
printf("upload file fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
tracker_close_connection_ex(pStorageServer, true);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g_tracker_server_http_port == 80)
|
||||||
|
{
|
||||||
|
*szPortPart = '\0';
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
|
||||||
|
}
|
||||||
|
|
||||||
|
sprintf(file_id, "%s/%s", group_name, remote_filename);
|
||||||
|
url_len = sprintf(file_url, "http://%s%s/%s", \
|
||||||
|
pStorageServer->ip_addr, szPortPart, file_id);
|
||||||
|
if (g_anti_steal_token)
|
||||||
|
{
|
||||||
|
ts = time(NULL);
|
||||||
|
fdfs_http_gen_token(&g_anti_steal_secret_key, file_id, \
|
||||||
|
ts, token);
|
||||||
|
sprintf(file_url + url_len, "?token=%s&ts=%d", \
|
||||||
|
token, (int)ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("group_name=%s, remote_filename=%s\n", \
|
||||||
|
group_name, remote_filename);
|
||||||
|
|
||||||
|
fdfs_get_file_info(group_name, remote_filename, &file_info);
|
||||||
|
printf("source ip address: %s\n", file_info.source_ip_addr);
|
||||||
|
printf("file timestamp=%s\n", formatDatetime(
|
||||||
|
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
|
||||||
|
szDatetime, sizeof(szDatetime)));
|
||||||
|
printf("file size=%"PRId64"\n", file_info.file_size);
|
||||||
|
printf("file crc32=%u\n", file_info.crc32);
|
||||||
|
printf("example file url: %s\n", file_url);
|
||||||
|
|
||||||
|
strcpy(master_filename, remote_filename);
|
||||||
|
*remote_filename = '\0';
|
||||||
|
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
||||||
|
{
|
||||||
|
prefix_name = "_big";
|
||||||
|
result = storage_upload_slave_by_filename(pTrackerServer,
|
||||||
|
NULL, local_filename, master_filename, \
|
||||||
|
prefix_name, file_ext_name, \
|
||||||
|
meta_list, meta_count, \
|
||||||
|
group_name, remote_filename);
|
||||||
|
|
||||||
|
printf("storage_upload_slave_by_filename\n");
|
||||||
|
}
|
||||||
|
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
||||||
|
{
|
||||||
|
char *file_content;
|
||||||
|
prefix_name = "1024x1024";
|
||||||
|
if ((result=getFileContent(local_filename, \
|
||||||
|
&file_content, &file_size)) == 0)
|
||||||
|
{
|
||||||
|
result = storage_upload_slave_by_filebuff(pTrackerServer, \
|
||||||
|
NULL, file_content, file_size, master_filename,
|
||||||
|
prefix_name, file_ext_name, \
|
||||||
|
meta_list, meta_count, \
|
||||||
|
group_name, remote_filename);
|
||||||
|
free(file_content);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("storage_upload_slave_by_filebuff\n");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
struct stat stat_buf;
|
||||||
|
|
||||||
|
prefix_name = "-small";
|
||||||
|
if (stat(local_filename, &stat_buf) == 0 && \
|
||||||
|
S_ISREG(stat_buf.st_mode))
|
||||||
|
{
|
||||||
|
file_size = stat_buf.st_size;
|
||||||
|
result = storage_upload_slave_by_callback(pTrackerServer, \
|
||||||
|
NULL, uploadFileCallback, local_filename, \
|
||||||
|
file_size, master_filename, prefix_name, \
|
||||||
|
file_ext_name, meta_list, meta_count, \
|
||||||
|
group_name, remote_filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("storage_upload_slave_by_callback\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
printf("upload slave file fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
tracker_close_connection_ex(pStorageServer, true);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g_tracker_server_http_port == 80)
|
||||||
|
{
|
||||||
|
*szPortPart = '\0';
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
|
||||||
|
}
|
||||||
|
|
||||||
|
sprintf(file_id, "%s/%s", group_name, remote_filename);
|
||||||
|
url_len = sprintf(file_url, "http://%s%s/%s", \
|
||||||
|
pStorageServer->ip_addr, szPortPart, file_id);
|
||||||
|
if (g_anti_steal_token)
|
||||||
|
{
|
||||||
|
ts = time(NULL);
|
||||||
|
fdfs_http_gen_token(&g_anti_steal_secret_key, file_id, \
|
||||||
|
ts, token);
|
||||||
|
sprintf(file_url + url_len, "?token=%s&ts=%d", \
|
||||||
|
token, (int)ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("group_name=%s, remote_filename=%s\n", \
|
||||||
|
group_name, remote_filename);
|
||||||
|
|
||||||
|
fdfs_get_file_info(group_name, remote_filename, &file_info);
|
||||||
|
|
||||||
|
printf("source ip address: %s\n", file_info.source_ip_addr);
|
||||||
|
printf("file timestamp=%s\n", formatDatetime(
|
||||||
|
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
|
||||||
|
szDatetime, sizeof(szDatetime)));
|
||||||
|
printf("file size=%"PRId64"\n", file_info.file_size);
|
||||||
|
printf("file crc32=%u\n", file_info.crc32);
|
||||||
|
printf("example file url: %s\n", file_url);
|
||||||
|
|
||||||
|
if (fdfs_gen_slave_filename(master_filename, \
|
||||||
|
prefix_name, file_ext_name, \
|
||||||
|
slave_filename, &slave_filename_len) == 0)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (strcmp(remote_filename, slave_filename) != 0)
|
||||||
|
{
|
||||||
|
printf("slave_filename=%s\n" \
|
||||||
|
"remote_filename=%s\n" \
|
||||||
|
"not equal!\n", \
|
||||||
|
slave_filename, remote_filename);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (strcmp(operation, "download") == 0 ||
|
||||||
|
strcmp(operation, "getmeta") == 0 ||
|
||||||
|
strcmp(operation, "setmeta") == 0 ||
|
||||||
|
strcmp(operation, "query_servers") == 0 ||
|
||||||
|
strcmp(operation, "delete") == 0)
|
||||||
|
{
|
||||||
|
if (argc < 5)
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> %s " \
|
||||||
|
"<group_name> <remote_filename>\n", \
|
||||||
|
argv[0], operation);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
snprintf(group_name, sizeof(group_name), "%s", argv[3]);
|
||||||
|
snprintf(remote_filename, sizeof(remote_filename), \
|
||||||
|
"%s", argv[4]);
|
||||||
|
if (strcmp(operation, "setmeta") == 0 ||
|
||||||
|
strcmp(operation, "delete") == 0)
|
||||||
|
{
|
||||||
|
result = tracker_query_storage_update(pTrackerServer, \
|
||||||
|
&storageServer, group_name, remote_filename);
|
||||||
|
}
|
||||||
|
else if (strcmp(operation, "query_servers") == 0)
|
||||||
|
{
|
||||||
|
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
|
||||||
|
int server_count;
|
||||||
|
|
||||||
|
result = tracker_query_storage_list(pTrackerServer, \
|
||||||
|
storageServers, FDFS_MAX_SERVERS_EACH_GROUP, \
|
||||||
|
&server_count, group_name, remote_filename);
|
||||||
|
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
printf("tracker_query_storage_list fail, "\
|
||||||
|
"group_name=%s, filename=%s, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
group_name, remote_filename, \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("server list (%d):\n", server_count);
|
||||||
|
for (i=0; i<server_count; i++)
|
||||||
|
{
|
||||||
|
printf("\t%s:%d\n", \
|
||||||
|
storageServers[i].ip_addr, \
|
||||||
|
storageServers[i].port);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker_close_connection_ex(pTrackerServer, result != 0);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result = tracker_query_storage_fetch(pTrackerServer, \
|
||||||
|
&storageServer, group_name, remote_filename);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
printf("tracker_query_storage_fetch fail, " \
|
||||||
|
"group_name=%s, filename=%s, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
group_name, remote_filename, \
|
||||||
|
result, STRERROR(result));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("storage=%s:%d\n", storageServer.ip_addr, \
|
||||||
|
storageServer.port);
|
||||||
|
|
||||||
|
if ((pStorageServer=tracker_make_connection(&storageServer, \
|
||||||
|
&result)) == NULL)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (strcmp(operation, "download") == 0)
|
||||||
|
{
|
||||||
|
if (argc >= 6)
|
||||||
|
{
|
||||||
|
local_filename = argv[5];
|
||||||
|
if (strcmp(local_filename, "CALLBACK") == 0)
|
||||||
|
{
|
||||||
|
FILE *fp;
|
||||||
|
fp = fopen(local_filename, "wb");
|
||||||
|
if (fp == NULL)
|
||||||
|
{
|
||||||
|
result = errno != 0 ? errno : EPERM;
|
||||||
|
printf("open file \"%s\" fail, " \
|
||||||
|
"errno: %d, error info: %s", \
|
||||||
|
local_filename, result, \
|
||||||
|
STRERROR(result));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result = storage_download_file_ex( \
|
||||||
|
pTrackerServer, pStorageServer, \
|
||||||
|
group_name, remote_filename, 0, 0, \
|
||||||
|
writeToFileCallback, fp, &file_size);
|
||||||
|
fclose(fp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result = storage_download_file_to_file( \
|
||||||
|
pTrackerServer, pStorageServer, \
|
||||||
|
group_name, remote_filename, \
|
||||||
|
local_filename, &file_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
file_buff = NULL;
|
||||||
|
if ((result=storage_download_file_to_buff( \
|
||||||
|
pTrackerServer, pStorageServer, \
|
||||||
|
group_name, remote_filename, \
|
||||||
|
&file_buff, &file_size)) == 0)
|
||||||
|
{
|
||||||
|
local_filename = strrchr( \
|
||||||
|
remote_filename, '/');
|
||||||
|
if (local_filename != NULL)
|
||||||
|
{
|
||||||
|
local_filename++; //skip /
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
local_filename=remote_filename;
|
||||||
|
}
|
||||||
|
|
||||||
|
result = writeToFile(local_filename, \
|
||||||
|
file_buff, file_size);
|
||||||
|
|
||||||
|
free(file_buff);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result == 0)
|
||||||
|
{
|
||||||
|
printf("download file success, " \
|
||||||
|
"file size=%"PRId64", file save to %s\n", \
|
||||||
|
file_size, local_filename);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("download file fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (strcmp(operation, "getmeta") == 0)
|
||||||
|
{
|
||||||
|
if ((result=storage_get_metadata(pTrackerServer, \
|
||||||
|
pStorageServer, group_name, remote_filename, \
|
||||||
|
&pMetaList, &meta_count)) == 0)
|
||||||
|
{
|
||||||
|
printf("get meta data success, " \
|
||||||
|
"meta count=%d\n", meta_count);
|
||||||
|
for (i=0; i<meta_count; i++)
|
||||||
|
{
|
||||||
|
printf("%s=%s\n", \
|
||||||
|
pMetaList[i].name, \
|
||||||
|
pMetaList[i].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
free(pMetaList);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("getmeta fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (strcmp(operation, "setmeta") == 0)
|
||||||
|
{
|
||||||
|
if (argc < 7)
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> %s " \
|
||||||
|
"<group_name> <remote_filename> " \
|
||||||
|
"<op_flag> <metadata_list>\n" \
|
||||||
|
"\top_flag: %c for overwrite, " \
|
||||||
|
"%c for merge\n" \
|
||||||
|
"\tmetadata_list: name1=value1," \
|
||||||
|
"name2=value2,...\n", \
|
||||||
|
argv[0], operation, \
|
||||||
|
STORAGE_SET_METADATA_FLAG_OVERWRITE, \
|
||||||
|
STORAGE_SET_METADATA_FLAG_MERGE);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
meta_buff = strdup(argv[6]);
|
||||||
|
if (meta_buff == NULL)
|
||||||
|
{
|
||||||
|
printf("Out of memory!\n");
|
||||||
|
return ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
pMetaList = fdfs_split_metadata_ex(meta_buff, \
|
||||||
|
',', '=', &meta_count, &result);
|
||||||
|
if (pMetaList == NULL)
|
||||||
|
{
|
||||||
|
printf("Out of memory!\n");
|
||||||
|
free(meta_buff);
|
||||||
|
return ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((result=storage_set_metadata(pTrackerServer, \
|
||||||
|
NULL, group_name, remote_filename, \
|
||||||
|
pMetaList, meta_count, *argv[5])) == 0)
|
||||||
|
{
|
||||||
|
printf("set meta data success\n");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("setmeta fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
|
||||||
|
free(meta_buff);
|
||||||
|
free(pMetaList);
|
||||||
|
}
|
||||||
|
else if(strcmp(operation, "delete") == 0)
|
||||||
|
{
|
||||||
|
if ((result=storage_delete_file(pTrackerServer, \
|
||||||
|
NULL, group_name, remote_filename)) == 0)
|
||||||
|
{
|
||||||
|
printf("delete file success\n");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("delete file fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
printf("invalid operation: %s\n", operation);
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* for test only */
|
||||||
|
if ((result=fdfs_active_test(pTrackerServer)) != 0)
|
||||||
|
{
|
||||||
|
printf("active_test to tracker server %s:%d fail, errno: %d\n", \
|
||||||
|
pTrackerServer->ip_addr, pTrackerServer->port, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* for test only */
|
||||||
|
if ((result=fdfs_active_test(pStorageServer)) != 0)
|
||||||
|
{
|
||||||
|
printf("active_test to storage server %s:%d fail, errno: %d\n", \
|
||||||
|
pStorageServer->ip_addr, pStorageServer->port, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker_close_connection_ex(pStorageServer, true);
|
||||||
|
tracker_close_connection_ex(pTrackerServer, true);
|
||||||
|
|
||||||
|
fdfs_client_destroy();
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
../fdfs_test1.c
|
|
||||||
|
|
@ -0,0 +1,658 @@
|
||||||
|
/**
|
||||||
|
* Copyright (C) 2008 Happy Fish / YuQing
|
||||||
|
*
|
||||||
|
* FastDFS may be copied only under the terms of the GNU General
|
||||||
|
* Public License V3, which may be found in the FastDFS source kit.
|
||||||
|
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
|
||||||
|
**/
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <string.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include "fdfs_client.h"
|
||||||
|
#include "fdfs_global.h"
|
||||||
|
#include "fastcommon/base64.h"
|
||||||
|
#include "fdfs_http_shared.h"
|
||||||
|
#include "fastcommon/sockopt.h"
|
||||||
|
#include "fastcommon/logger.h"
|
||||||
|
|
||||||
|
int writeToFileCallback(void *arg, const int64_t file_size, const char *data, \
|
||||||
|
const int current_size)
|
||||||
|
{
|
||||||
|
if (arg == NULL)
|
||||||
|
{
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fwrite(data, current_size, 1, (FILE *)arg) != 1)
|
||||||
|
{
|
||||||
|
return errno != 0 ? errno : EIO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int uploadFileCallback(void *arg, const int64_t file_size, int sock)
|
||||||
|
{
|
||||||
|
int64_t total_send_bytes;
|
||||||
|
char *filename;
|
||||||
|
if (arg == NULL)
|
||||||
|
{
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
filename = (char *)arg;
|
||||||
|
return tcpsendfile(sock, filename, file_size, \
|
||||||
|
g_fdfs_network_timeout, &total_send_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char *argv[])
|
||||||
|
{
|
||||||
|
char *conf_filename;
|
||||||
|
char *local_filename;
|
||||||
|
ConnectionInfo *pTrackerServer;
|
||||||
|
ConnectionInfo *pStorageServer;
|
||||||
|
int result;
|
||||||
|
ConnectionInfo storageServer;
|
||||||
|
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
|
||||||
|
FDFSMetaData meta_list[32];
|
||||||
|
int meta_count;
|
||||||
|
int i;
|
||||||
|
FDFSMetaData *pMetaList;
|
||||||
|
char token[32 + 1];
|
||||||
|
char file_id[128];
|
||||||
|
char master_file_id[128];
|
||||||
|
char file_url[256];
|
||||||
|
char szDatetime[20];
|
||||||
|
char szPortPart[16];
|
||||||
|
int url_len;
|
||||||
|
time_t ts;
|
||||||
|
char *file_buff;
|
||||||
|
int64_t file_size;
|
||||||
|
char *operation;
|
||||||
|
char *meta_buff;
|
||||||
|
int store_path_index;
|
||||||
|
FDFSFileInfo file_info;
|
||||||
|
|
||||||
|
printf("This is FastDFS client test program v%d.%02d\n" \
|
||||||
|
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
|
||||||
|
"\nFastDFS may be copied only under the terms of the GNU General\n" \
|
||||||
|
"Public License V3, which may be found in the FastDFS source kit.\n" \
|
||||||
|
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
|
||||||
|
"for more detail.\n\n" \
|
||||||
|
, g_fdfs_version.major, g_fdfs_version.minor);
|
||||||
|
|
||||||
|
if (argc < 3)
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> <operation>\n" \
|
||||||
|
"\toperation: upload, download, getmeta, setmeta, " \
|
||||||
|
"delete and query_servers\n", argv[0]);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
log_init();
|
||||||
|
g_log_context.log_level = LOG_DEBUG;
|
||||||
|
|
||||||
|
conf_filename = argv[1];
|
||||||
|
operation = argv[2];
|
||||||
|
if ((result=fdfs_client_init(conf_filename)) != 0)
|
||||||
|
{
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
pTrackerServer = tracker_get_connection();
|
||||||
|
if (pTrackerServer == NULL)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return errno != 0 ? errno : ECONNREFUSED;
|
||||||
|
}
|
||||||
|
|
||||||
|
local_filename = NULL;
|
||||||
|
if (strcmp(operation, "upload") == 0)
|
||||||
|
{
|
||||||
|
int upload_type;
|
||||||
|
char *prefix_name;
|
||||||
|
const char *file_ext_name;
|
||||||
|
char slave_file_id[256];
|
||||||
|
int slave_file_id_len;
|
||||||
|
|
||||||
|
if (argc < 4)
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> upload " \
|
||||||
|
"<local_filename> [FILE | BUFF | CALLBACK] \n",\
|
||||||
|
argv[0]);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
local_filename = argv[3];
|
||||||
|
if (argc == 4)
|
||||||
|
{
|
||||||
|
upload_type = FDFS_UPLOAD_BY_FILE;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (strcmp(argv[4], "BUFF") == 0)
|
||||||
|
{
|
||||||
|
upload_type = FDFS_UPLOAD_BY_BUFF;
|
||||||
|
}
|
||||||
|
else if (strcmp(argv[4], "CALLBACK") == 0)
|
||||||
|
{
|
||||||
|
upload_type = FDFS_UPLOAD_BY_CALLBACK;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
upload_type = FDFS_UPLOAD_BY_FILE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
|
||||||
|
ConnectionInfo *pServer;
|
||||||
|
ConnectionInfo *pServerEnd;
|
||||||
|
int storage_count;
|
||||||
|
|
||||||
|
strcpy(group_name, "group1");
|
||||||
|
if ((result=tracker_query_storage_store_list_with_group( \
|
||||||
|
pTrackerServer, group_name, storageServers, \
|
||||||
|
FDFS_MAX_SERVERS_EACH_GROUP, &storage_count, \
|
||||||
|
&store_path_index)) == 0)
|
||||||
|
{
|
||||||
|
printf("tracker_query_storage_store_list_with_group: \n");
|
||||||
|
pServerEnd = storageServers + storage_count;
|
||||||
|
for (pServer=storageServers; pServer<pServerEnd; pServer++)
|
||||||
|
{
|
||||||
|
printf("\tserver %d. group_name=%s, " \
|
||||||
|
"ip_addr=%s, port=%d\n", \
|
||||||
|
(int)(pServer - storageServers) + 1, \
|
||||||
|
group_name, pServer->ip_addr, \
|
||||||
|
pServer->port);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*group_name = '\0';
|
||||||
|
if ((result=tracker_query_storage_store(pTrackerServer, \
|
||||||
|
&storageServer, group_name, &store_path_index)) != 0)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
printf("tracker_query_storage fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("group_name=%s, ip_addr=%s, port=%d\n", \
|
||||||
|
group_name, storageServer.ip_addr, \
|
||||||
|
storageServer.port);
|
||||||
|
|
||||||
|
if ((pStorageServer=tracker_make_connection(&storageServer, \
|
||||||
|
&result)) == NULL)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(&meta_list, 0, sizeof(meta_list));
|
||||||
|
meta_count = 0;
|
||||||
|
strcpy(meta_list[meta_count].name, "ext_name");
|
||||||
|
strcpy(meta_list[meta_count].value, "jpg");
|
||||||
|
meta_count++;
|
||||||
|
strcpy(meta_list[meta_count].name, "width");
|
||||||
|
strcpy(meta_list[meta_count].value, "160");
|
||||||
|
meta_count++;
|
||||||
|
strcpy(meta_list[meta_count].name, "height");
|
||||||
|
strcpy(meta_list[meta_count].value, "80");
|
||||||
|
meta_count++;
|
||||||
|
strcpy(meta_list[meta_count].name, "file_size");
|
||||||
|
strcpy(meta_list[meta_count].value, "115120");
|
||||||
|
meta_count++;
|
||||||
|
|
||||||
|
file_ext_name = fdfs_get_file_ext_name(local_filename);
|
||||||
|
strcpy(group_name, "");
|
||||||
|
|
||||||
|
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
||||||
|
{
|
||||||
|
printf("storage_upload_by_filename\n");
|
||||||
|
result = storage_upload_by_filename1(pTrackerServer, \
|
||||||
|
pStorageServer, store_path_index, \
|
||||||
|
local_filename, file_ext_name, \
|
||||||
|
meta_list, meta_count, \
|
||||||
|
group_name, file_id);
|
||||||
|
}
|
||||||
|
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
||||||
|
{
|
||||||
|
char *file_content;
|
||||||
|
printf("storage_upload_by_filebuff\n");
|
||||||
|
if ((result=getFileContent(local_filename, \
|
||||||
|
&file_content, &file_size)) == 0)
|
||||||
|
{
|
||||||
|
result = storage_upload_by_filebuff1(pTrackerServer, \
|
||||||
|
pStorageServer, store_path_index, \
|
||||||
|
file_content, file_size, file_ext_name, \
|
||||||
|
meta_list, meta_count, \
|
||||||
|
group_name, file_id);
|
||||||
|
free(file_content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
struct stat stat_buf;
|
||||||
|
|
||||||
|
printf("storage_upload_by_callback\n");
|
||||||
|
if (stat(local_filename, &stat_buf) == 0 && \
|
||||||
|
S_ISREG(stat_buf.st_mode))
|
||||||
|
{
|
||||||
|
file_size = stat_buf.st_size;
|
||||||
|
result = storage_upload_by_callback1(pTrackerServer, \
|
||||||
|
pStorageServer, store_path_index, \
|
||||||
|
uploadFileCallback, local_filename, \
|
||||||
|
file_size, file_ext_name, \
|
||||||
|
meta_list, meta_count, \
|
||||||
|
group_name, file_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
printf("upload file fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
tracker_close_connection_ex(pStorageServer, true);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g_tracker_server_http_port == 80)
|
||||||
|
{
|
||||||
|
*szPortPart = '\0';
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
|
||||||
|
}
|
||||||
|
|
||||||
|
url_len = sprintf(file_url, "http://%s%s/%s", \
|
||||||
|
pStorageServer->ip_addr, szPortPart, file_id);
|
||||||
|
if (g_anti_steal_token)
|
||||||
|
{
|
||||||
|
ts = time(NULL);
|
||||||
|
fdfs_http_gen_token(&g_anti_steal_secret_key, \
|
||||||
|
file_id, ts, token);
|
||||||
|
sprintf(file_url + url_len, "?token=%s&ts=%d", \
|
||||||
|
token, (int)ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
fdfs_get_file_info1(file_id, &file_info);
|
||||||
|
printf("source ip address: %s\n", file_info.source_ip_addr);
|
||||||
|
printf("file timestamp=%s\n", formatDatetime(
|
||||||
|
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
|
||||||
|
szDatetime, sizeof(szDatetime)));
|
||||||
|
printf("file size=%"PRId64"\n", file_info.file_size);
|
||||||
|
printf("file crc32=%u\n", file_info.crc32);
|
||||||
|
printf("example file url: %s\n", file_url);
|
||||||
|
|
||||||
|
strcpy(master_file_id, file_id);
|
||||||
|
*file_id = '\0';
|
||||||
|
|
||||||
|
if (upload_type == FDFS_UPLOAD_BY_FILE)
|
||||||
|
{
|
||||||
|
prefix_name = "_big";
|
||||||
|
printf("storage_upload_slave_by_filename\n");
|
||||||
|
result = storage_upload_slave_by_filename1( \
|
||||||
|
pTrackerServer, NULL, \
|
||||||
|
local_filename, master_file_id, \
|
||||||
|
prefix_name, file_ext_name, \
|
||||||
|
meta_list, meta_count, file_id);
|
||||||
|
}
|
||||||
|
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
|
||||||
|
{
|
||||||
|
char *file_content;
|
||||||
|
prefix_name = "1024x1024";
|
||||||
|
printf("storage_upload_slave_by_filebuff\n");
|
||||||
|
if ((result=getFileContent(local_filename, \
|
||||||
|
&file_content, &file_size)) == 0)
|
||||||
|
{
|
||||||
|
result = storage_upload_slave_by_filebuff1( \
|
||||||
|
pTrackerServer, NULL, file_content, file_size, \
|
||||||
|
master_file_id, prefix_name, file_ext_name, \
|
||||||
|
meta_list, meta_count, file_id);
|
||||||
|
free(file_content);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
struct stat stat_buf;
|
||||||
|
|
||||||
|
prefix_name = "_small";
|
||||||
|
printf("storage_upload_slave_by_callback\n");
|
||||||
|
if (stat(local_filename, &stat_buf) == 0 && \
|
||||||
|
S_ISREG(stat_buf.st_mode))
|
||||||
|
{
|
||||||
|
file_size = stat_buf.st_size;
|
||||||
|
result = storage_upload_slave_by_callback1( \
|
||||||
|
pTrackerServer, NULL, \
|
||||||
|
uploadFileCallback, local_filename, \
|
||||||
|
file_size, master_file_id, \
|
||||||
|
prefix_name, file_ext_name, \
|
||||||
|
meta_list, meta_count, file_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
printf("upload slave file fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
tracker_close_connection_ex(pStorageServer, true);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g_tracker_server_http_port == 80)
|
||||||
|
{
|
||||||
|
*szPortPart = '\0';
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
|
||||||
|
}
|
||||||
|
url_len = sprintf(file_url, "http://%s%s/%s", \
|
||||||
|
pStorageServer->ip_addr, szPortPart, file_id);
|
||||||
|
if (g_anti_steal_token)
|
||||||
|
{
|
||||||
|
ts = time(NULL);
|
||||||
|
fdfs_http_gen_token(&g_anti_steal_secret_key, \
|
||||||
|
file_id, ts, token);
|
||||||
|
sprintf(file_url + url_len, "?token=%s&ts=%d", \
|
||||||
|
token, (int)ts);
|
||||||
|
}
|
||||||
|
|
||||||
|
fdfs_get_file_info1(file_id, &file_info);
|
||||||
|
printf("source ip address: %s\n", file_info.source_ip_addr);
|
||||||
|
printf("file timestamp=%s\n", formatDatetime(
|
||||||
|
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
|
||||||
|
szDatetime, sizeof(szDatetime)));
|
||||||
|
printf("file size=%"PRId64"\n", file_info.file_size);
|
||||||
|
printf("file crc32=%u\n", file_info.crc32);
|
||||||
|
printf("example file url: %s\n", file_url);
|
||||||
|
|
||||||
|
if (fdfs_gen_slave_filename(master_file_id, \
|
||||||
|
prefix_name, file_ext_name, \
|
||||||
|
slave_file_id, &slave_file_id_len) == 0)
|
||||||
|
{
|
||||||
|
if (strcmp(file_id, slave_file_id) != 0)
|
||||||
|
{
|
||||||
|
printf("slave_file_id=%s\n" \
|
||||||
|
"file_id=%s\n" \
|
||||||
|
"not equal!\n", \
|
||||||
|
slave_file_id, file_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (strcmp(operation, "download") == 0 ||
|
||||||
|
strcmp(operation, "getmeta") == 0 ||
|
||||||
|
strcmp(operation, "setmeta") == 0 ||
|
||||||
|
strcmp(operation, "query_servers") == 0 ||
|
||||||
|
strcmp(operation, "delete") == 0)
|
||||||
|
{
|
||||||
|
if (argc < 4)
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> %s " \
|
||||||
|
"<file_id>\n", \
|
||||||
|
argv[0], operation);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
snprintf(file_id, sizeof(file_id), "%s", argv[3]);
|
||||||
|
if (strcmp(operation, "query_servers") == 0)
|
||||||
|
{
|
||||||
|
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
|
||||||
|
int server_count;
|
||||||
|
|
||||||
|
result = tracker_query_storage_list1(pTrackerServer, \
|
||||||
|
storageServers, FDFS_MAX_SERVERS_EACH_GROUP, \
|
||||||
|
&server_count, file_id);
|
||||||
|
|
||||||
|
if (result != 0)
|
||||||
|
{
|
||||||
|
printf("tracker_query_storage_list1 fail, "\
|
||||||
|
"file_id=%s, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
file_id, result, STRERROR(result));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("server list (%d):\n", server_count);
|
||||||
|
for (i=0; i<server_count; i++)
|
||||||
|
{
|
||||||
|
printf("\t%s:%d\n", \
|
||||||
|
storageServers[i].ip_addr, \
|
||||||
|
storageServers[i].port);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker_close_connection_ex(pTrackerServer, true);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((result=tracker_query_storage_fetch1(pTrackerServer, \
|
||||||
|
&storageServer, file_id)) != 0)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
printf("tracker_query_storage_fetch fail, " \
|
||||||
|
"file_id=%s, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
file_id, result, STRERROR(result));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("storage=%s:%d\n", storageServer.ip_addr, \
|
||||||
|
storageServer.port);
|
||||||
|
|
||||||
|
if ((pStorageServer=tracker_make_connection(&storageServer, \
|
||||||
|
&result)) == NULL)
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (strcmp(operation, "download") == 0)
|
||||||
|
{
|
||||||
|
if (argc >= 5)
|
||||||
|
{
|
||||||
|
local_filename = argv[4];
|
||||||
|
if (strcmp(local_filename, "CALLBACK") == 0)
|
||||||
|
{
|
||||||
|
FILE *fp;
|
||||||
|
fp = fopen(local_filename, "wb");
|
||||||
|
if (fp == NULL)
|
||||||
|
{
|
||||||
|
result = errno != 0 ? errno : EPERM;
|
||||||
|
printf("open file \"%s\" fail, " \
|
||||||
|
"errno: %d, error info: %s", \
|
||||||
|
local_filename, result, \
|
||||||
|
STRERROR(result));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result = storage_download_file_ex1( \
|
||||||
|
pTrackerServer, pStorageServer, \
|
||||||
|
file_id, 0, 0, \
|
||||||
|
writeToFileCallback, fp, &file_size);
|
||||||
|
fclose(fp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
result = storage_download_file_to_file1( \
|
||||||
|
pTrackerServer, pStorageServer, \
|
||||||
|
file_id, \
|
||||||
|
local_filename, &file_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
file_buff = NULL;
|
||||||
|
if ((result=storage_download_file_to_buff1( \
|
||||||
|
pTrackerServer, pStorageServer, \
|
||||||
|
file_id, \
|
||||||
|
&file_buff, &file_size)) == 0)
|
||||||
|
{
|
||||||
|
local_filename = strrchr( \
|
||||||
|
file_id, '/');
|
||||||
|
if (local_filename != NULL)
|
||||||
|
{
|
||||||
|
local_filename++; //skip /
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
local_filename=file_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
result = writeToFile(local_filename, \
|
||||||
|
file_buff, file_size);
|
||||||
|
|
||||||
|
free(file_buff);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result == 0)
|
||||||
|
{
|
||||||
|
printf("download file success, " \
|
||||||
|
"file size=%"PRId64", file save to %s\n", \
|
||||||
|
file_size, local_filename);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("download file fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (strcmp(operation, "getmeta") == 0)
|
||||||
|
{
|
||||||
|
if ((result=storage_get_metadata1(pTrackerServer, \
|
||||||
|
NULL, file_id, \
|
||||||
|
&pMetaList, &meta_count)) == 0)
|
||||||
|
{
|
||||||
|
printf("get meta data success, " \
|
||||||
|
"meta count=%d\n", meta_count);
|
||||||
|
for (i=0; i<meta_count; i++)
|
||||||
|
{
|
||||||
|
printf("%s=%s\n", \
|
||||||
|
pMetaList[i].name, \
|
||||||
|
pMetaList[i].value);
|
||||||
|
}
|
||||||
|
|
||||||
|
free(pMetaList);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("getmeta fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (strcmp(operation, "setmeta") == 0)
|
||||||
|
{
|
||||||
|
if (argc < 6)
|
||||||
|
{
|
||||||
|
printf("Usage: %s <config_file> %s " \
|
||||||
|
"<file_id> " \
|
||||||
|
"<op_flag> <metadata_list>\n" \
|
||||||
|
"\top_flag: %c for overwrite, " \
|
||||||
|
"%c for merge\n" \
|
||||||
|
"\tmetadata_list: name1=value1," \
|
||||||
|
"name2=value2,...\n", \
|
||||||
|
argv[0], operation, \
|
||||||
|
STORAGE_SET_METADATA_FLAG_OVERWRITE, \
|
||||||
|
STORAGE_SET_METADATA_FLAG_MERGE);
|
||||||
|
fdfs_client_destroy();
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
meta_buff = strdup(argv[5]);
|
||||||
|
if (meta_buff == NULL)
|
||||||
|
{
|
||||||
|
printf("Out of memory!\n");
|
||||||
|
return ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
pMetaList = fdfs_split_metadata_ex(meta_buff, \
|
||||||
|
',', '=', &meta_count, &result);
|
||||||
|
if (pMetaList == NULL)
|
||||||
|
{
|
||||||
|
printf("Out of memory!\n");
|
||||||
|
free(meta_buff);
|
||||||
|
return ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ((result=storage_set_metadata1(pTrackerServer, \
|
||||||
|
NULL, file_id, \
|
||||||
|
pMetaList, meta_count, *argv[4])) == 0)
|
||||||
|
{
|
||||||
|
printf("set meta data success\n");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("setmeta fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
|
||||||
|
free(meta_buff);
|
||||||
|
free(pMetaList);
|
||||||
|
}
|
||||||
|
else if(strcmp(operation, "delete") == 0)
|
||||||
|
{
|
||||||
|
if ((result=storage_delete_file1(pTrackerServer, \
|
||||||
|
NULL, file_id)) == 0)
|
||||||
|
{
|
||||||
|
printf("delete file success\n");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
printf("delete file fail, " \
|
||||||
|
"error no: %d, error info: %s\n", \
|
||||||
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
fdfs_client_destroy();
|
||||||
|
printf("invalid operation: %s\n", operation);
|
||||||
|
return EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* for test only */
|
||||||
|
if ((result=fdfs_active_test(pTrackerServer)) != 0)
|
||||||
|
{
|
||||||
|
printf("active_test to tracker server %s:%d fail, errno: %d\n", \
|
||||||
|
pTrackerServer->ip_addr, pTrackerServer->port, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* for test only */
|
||||||
|
if ((result=fdfs_active_test(pStorageServer)) != 0)
|
||||||
|
{
|
||||||
|
printf("active_test to storage server %s:%d fail, errno: %d\n", \
|
||||||
|
pStorageServer->ip_addr, pStorageServer->port, result);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker_close_connection_ex(pStorageServer, true);
|
||||||
|
tracker_close_connection_ex(pTrackerServer, true);
|
||||||
|
|
||||||
|
fdfs_client_destroy();
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -249,7 +249,6 @@ int tracker_list_servers(ConnectionInfo *pTrackerServer, \
|
||||||
{
|
{
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
||||||
IP_ADDRESS_SIZE];
|
IP_ADDRESS_SIZE];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
bool new_connection;
|
bool new_connection;
|
||||||
TrackerHeader *pHeader;
|
TrackerHeader *pHeader;
|
||||||
ConnectionInfo *conn;
|
ConnectionInfo *conn;
|
||||||
|
|
@ -294,16 +293,17 @@ int tracker_list_servers(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
long2buff(FDFS_GROUP_NAME_MAX_LEN + id_len, pHeader->pkg_len);
|
long2buff(FDFS_GROUP_NAME_MAX_LEN + id_len, pHeader->pkg_len);
|
||||||
pHeader->cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE;
|
pHeader->cmd = TRACKER_PROTO_CMD_SERVER_LIST_STORAGE;
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + id_len,
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + id_len, \
|
||||||
SF_G_NETWORK_TIMEOUT)) != 0)
|
g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, errno: %d, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
}
|
result, STRERROR(result));
|
||||||
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
pInBuff = (char *)stats;
|
pInBuff = (char *)stats;
|
||||||
|
|
@ -330,10 +330,10 @@ int tracker_list_servers(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes % sizeof(TrackerStorageStat) != 0)
|
if (in_bytes % sizeof(TrackerStorageStat) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data length: %"PRId64
|
"length: %"PRId64" is invalid", \
|
||||||
" is invalid", __LINE__, formatted_ip,
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, in_bytes);
|
pTrackerServer->port, in_bytes);
|
||||||
*storage_count = 0;
|
*storage_count = 0;
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
@ -342,12 +342,11 @@ int tracker_list_servers(ConnectionInfo *pTrackerServer, \
|
||||||
*storage_count = in_bytes / sizeof(TrackerStorageStat);
|
*storage_count = in_bytes / sizeof(TrackerStorageStat);
|
||||||
if (*storage_count > max_storages)
|
if (*storage_count > max_storages)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d insufficent space, " \
|
||||||
"tracker server %s:%u insufficent space, "
|
"max storage count: %d, expect count: %d", \
|
||||||
"max storage count: %d, expect count: %d",
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
__LINE__, formatted_ip, pTrackerServer->port,
|
pTrackerServer->port, max_storages, *storage_count);
|
||||||
max_storages, *storage_count);
|
|
||||||
*storage_count = 0;
|
*storage_count = 0;
|
||||||
return ENOSPC;
|
return ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
@ -485,7 +484,6 @@ int tracker_list_one_group(ConnectionInfo *pTrackerServer, \
|
||||||
ConnectionInfo *conn;
|
ConnectionInfo *conn;
|
||||||
bool new_connection;
|
bool new_connection;
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN];
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
TrackerGroupStat src;
|
TrackerGroupStat src;
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
int result;
|
int result;
|
||||||
|
|
@ -499,14 +497,15 @@ int tracker_list_one_group(ConnectionInfo *pTrackerServer, \
|
||||||
sizeof(TrackerHeader), "%s", group_name);
|
sizeof(TrackerHeader), "%s", group_name);
|
||||||
pHeader->cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP;
|
pHeader->cmd = TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP;
|
||||||
long2buff(FDFS_GROUP_NAME_MAX_LEN, pHeader->pkg_len);
|
long2buff(FDFS_GROUP_NAME_MAX_LEN, pHeader->pkg_len);
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
sizeof(out_buff), SF_G_NETWORK_TIMEOUT)) != 0)
|
sizeof(out_buff), g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, errno: %d, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -533,10 +532,10 @@ int tracker_list_one_group(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes != sizeof(TrackerGroupStat))
|
if (in_bytes != sizeof(TrackerGroupStat))
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid", \
|
||||||
"is invalid", __LINE__, formatted_ip,
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, in_bytes);
|
pTrackerServer->port, in_bytes);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
@ -570,7 +569,6 @@ int tracker_list_groups(ConnectionInfo *pTrackerServer, \
|
||||||
TrackerGroupStat *pSrc;
|
TrackerGroupStat *pSrc;
|
||||||
TrackerGroupStat *pEnd;
|
TrackerGroupStat *pEnd;
|
||||||
FDFSGroupStat *pDest;
|
FDFSGroupStat *pDest;
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
int result;
|
int result;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
|
|
||||||
|
|
@ -579,14 +577,15 @@ int tracker_list_groups(ConnectionInfo *pTrackerServer, \
|
||||||
memset(&header, 0, sizeof(header));
|
memset(&header, 0, sizeof(header));
|
||||||
header.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS;
|
header.cmd = TRACKER_PROTO_CMD_SERVER_LIST_ALL_GROUPS;
|
||||||
header.status = 0;
|
header.status = 0;
|
||||||
if ((result=tcpsenddata_nb(conn->sock, &header,
|
if ((result=tcpsenddata_nb(conn->sock, &header, \
|
||||||
sizeof(header), SF_G_NETWORK_TIMEOUT)) != 0)
|
sizeof(header), g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, errno: %d, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -614,10 +613,10 @@ int tracker_list_groups(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes % sizeof(TrackerGroupStat) != 0)
|
if (in_bytes % sizeof(TrackerGroupStat) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid", \
|
||||||
"is invalid", __LINE__, formatted_ip,
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, in_bytes);
|
pTrackerServer->port, in_bytes);
|
||||||
*group_count = 0;
|
*group_count = 0;
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
|
|
@ -626,12 +625,11 @@ int tracker_list_groups(ConnectionInfo *pTrackerServer, \
|
||||||
*group_count = in_bytes / sizeof(TrackerGroupStat);
|
*group_count = in_bytes / sizeof(TrackerGroupStat);
|
||||||
if (*group_count > max_groups)
|
if (*group_count > max_groups)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d insufficent space, " \
|
||||||
"tracker server %s:%u insufficent space, "
|
"max group count: %d, expect count: %d", \
|
||||||
"max group count: %d, expect count: %d",
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
__LINE__, formatted_ip, pTrackerServer->port,
|
pTrackerServer->port, max_groups, *group_count);
|
||||||
max_groups, *group_count);
|
|
||||||
*group_count = 0;
|
*group_count = 0;
|
||||||
return ENOSPC;
|
return ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
@ -674,7 +672,6 @@ int tracker_do_query_storage(ConnectionInfo *pTrackerServer, \
|
||||||
bool new_connection;
|
bool new_connection;
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + 128];
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + 128];
|
||||||
char in_buff[sizeof(TrackerHeader) + TRACKER_QUERY_STORAGE_FETCH_BODY_LEN];
|
char in_buff[sizeof(TrackerHeader) + TRACKER_QUERY_STORAGE_FETCH_BODY_LEN];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int result;
|
int result;
|
||||||
|
|
@ -696,15 +693,16 @@ int tracker_do_query_storage(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
long2buff(FDFS_GROUP_NAME_MAX_LEN + filename_len, pHeader->pkg_len);
|
long2buff(FDFS_GROUP_NAME_MAX_LEN + filename_len, pHeader->pkg_len);
|
||||||
pHeader->cmd = cmd;
|
pHeader->cmd = cmd;
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
||||||
filename_len, SF_G_NETWORK_TIMEOUT)) != 0)
|
filename_len, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, errno: %d, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -731,11 +729,12 @@ int tracker_do_query_storage(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN)
|
if (in_bytes != TRACKER_QUERY_STORAGE_FETCH_BODY_LEN)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid, " \
|
||||||
"is invalid, expect length: %d", __LINE__,
|
"expect length: %d", __LINE__, \
|
||||||
formatted_ip, pTrackerServer->port, in_bytes,
|
pTrackerServer->ip_addr, \
|
||||||
|
pTrackerServer->port, in_bytes, \
|
||||||
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN);
|
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
@ -760,7 +759,6 @@ int tracker_query_storage_list(ConnectionInfo *pTrackerServer, \
|
||||||
char in_buff[sizeof(TrackerHeader) + \
|
char in_buff[sizeof(TrackerHeader) + \
|
||||||
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN + \
|
TRACKER_QUERY_STORAGE_FETCH_BODY_LEN + \
|
||||||
FDFS_MAX_SERVERS_EACH_GROUP * IP_ADDRESS_SIZE];
|
FDFS_MAX_SERVERS_EACH_GROUP * IP_ADDRESS_SIZE];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int result;
|
int result;
|
||||||
|
|
@ -781,13 +779,14 @@ int tracker_query_storage_list(ConnectionInfo *pTrackerServer, \
|
||||||
pHeader->cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL;
|
pHeader->cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL;
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
||||||
filename_len, SF_G_NETWORK_TIMEOUT)) != 0)
|
filename_len, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -815,10 +814,10 @@ int tracker_query_storage_list(ConnectionInfo *pTrackerServer, \
|
||||||
if ((in_bytes - TRACKER_QUERY_STORAGE_FETCH_BODY_LEN) % \
|
if ((in_bytes - TRACKER_QUERY_STORAGE_FETCH_BODY_LEN) % \
|
||||||
(IP_ADDRESS_SIZE - 1) != 0)
|
(IP_ADDRESS_SIZE - 1) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid", \
|
||||||
"is invalid", __LINE__, formatted_ip,
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, in_bytes);
|
pTrackerServer->port, in_bytes);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
@ -827,11 +826,10 @@ int tracker_query_storage_list(ConnectionInfo *pTrackerServer, \
|
||||||
(IP_ADDRESS_SIZE - 1);
|
(IP_ADDRESS_SIZE - 1);
|
||||||
if (nMaxServerCount < *server_count)
|
if (nMaxServerCount < *server_count)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response storage server " \
|
||||||
"tracker server %s:%u response storage server "
|
"count: %d, exceeds max server count: %d!", __LINE__, \
|
||||||
"count: %d, exceeds max server count: %d!", __LINE__,
|
pTrackerServer->ip_addr, pTrackerServer->port, \
|
||||||
formatted_ip, pTrackerServer->port,
|
|
||||||
*server_count, nMaxServerCount);
|
*server_count, nMaxServerCount);
|
||||||
return ENOSPC;
|
return ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
@ -866,7 +864,6 @@ int tracker_query_storage_store_without_group(ConnectionInfo *pTrackerServer,
|
||||||
TrackerHeader header;
|
TrackerHeader header;
|
||||||
char in_buff[sizeof(TrackerHeader) + \
|
char in_buff[sizeof(TrackerHeader) + \
|
||||||
TRACKER_QUERY_STORAGE_STORE_BODY_LEN];
|
TRACKER_QUERY_STORAGE_STORE_BODY_LEN];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
bool new_connection;
|
bool new_connection;
|
||||||
ConnectionInfo *conn;
|
ConnectionInfo *conn;
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
|
|
@ -881,13 +878,14 @@ int tracker_query_storage_store_without_group(ConnectionInfo *pTrackerServer,
|
||||||
memset(&header, 0, sizeof(header));
|
memset(&header, 0, sizeof(header));
|
||||||
header.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE;
|
header.cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ONE;
|
||||||
if ((result=tcpsenddata_nb(conn->sock, &header, \
|
if ((result=tcpsenddata_nb(conn->sock, &header, \
|
||||||
sizeof(header), SF_G_NETWORK_TIMEOUT)) != 0)
|
sizeof(header), g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -914,12 +912,12 @@ int tracker_query_storage_store_without_group(ConnectionInfo *pTrackerServer,
|
||||||
|
|
||||||
if (in_bytes != TRACKER_QUERY_STORAGE_STORE_BODY_LEN)
|
if (in_bytes != TRACKER_QUERY_STORAGE_STORE_BODY_LEN)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid, " \
|
||||||
"is invalid, expect length: %d", __LINE__,
|
"expect length: %d", __LINE__, \
|
||||||
formatted_ip, pTrackerServer->port, in_bytes,
|
pTrackerServer->ip_addr, pTrackerServer->port, \
|
||||||
TRACKER_QUERY_STORAGE_STORE_BODY_LEN);
|
in_bytes, TRACKER_QUERY_STORAGE_STORE_BODY_LEN);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -945,7 +943,6 @@ int tracker_query_storage_store_with_group(ConnectionInfo *pTrackerServer, \
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN];
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN];
|
||||||
char in_buff[sizeof(TrackerHeader) + \
|
char in_buff[sizeof(TrackerHeader) + \
|
||||||
TRACKER_QUERY_STORAGE_STORE_BODY_LEN];
|
TRACKER_QUERY_STORAGE_STORE_BODY_LEN];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int result;
|
int result;
|
||||||
|
|
@ -962,15 +959,16 @@ int tracker_query_storage_store_with_group(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
long2buff(FDFS_GROUP_NAME_MAX_LEN, pHeader->pkg_len);
|
long2buff(FDFS_GROUP_NAME_MAX_LEN, pHeader->pkg_len);
|
||||||
pHeader->cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE;
|
pHeader->cmd = TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ONE;
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN,
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN, \
|
||||||
SF_G_NETWORK_TIMEOUT)) != 0)
|
g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -997,11 +995,11 @@ int tracker_query_storage_store_with_group(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes != TRACKER_QUERY_STORAGE_STORE_BODY_LEN)
|
if (in_bytes != TRACKER_QUERY_STORAGE_STORE_BODY_LEN)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data "
|
"length: %"PRId64" is invalid, " \
|
||||||
"length: %"PRId64" is invalid, expect length: %d",
|
"expect length: %d", __LINE__, \
|
||||||
__LINE__, formatted_ip, pTrackerServer->port,
|
pTrackerServer->ip_addr, pTrackerServer->port, \
|
||||||
in_bytes, TRACKER_QUERY_STORAGE_STORE_BODY_LEN);
|
in_bytes, TRACKER_QUERY_STORAGE_STORE_BODY_LEN);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
@ -1029,7 +1027,6 @@ int tracker_query_storage_store_list_with_group( \
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN];
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN];
|
||||||
char in_buff[sizeof(TrackerHeader) + FDFS_MAX_SERVERS_EACH_GROUP * \
|
char in_buff[sizeof(TrackerHeader) + FDFS_MAX_SERVERS_EACH_GROUP * \
|
||||||
TRACKER_QUERY_STORAGE_STORE_BODY_LEN];
|
TRACKER_QUERY_STORAGE_STORE_BODY_LEN];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char returned_group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
|
char returned_group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
char *p;
|
char *p;
|
||||||
|
|
@ -1058,14 +1055,15 @@ int tracker_query_storage_store_list_with_group( \
|
||||||
}
|
}
|
||||||
|
|
||||||
long2buff(out_len, pHeader->pkg_len);
|
long2buff(out_len, pHeader->pkg_len);
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + out_len, SF_G_NETWORK_TIMEOUT)) != 0)
|
sizeof(TrackerHeader) + out_len, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -1092,11 +1090,11 @@ int tracker_query_storage_store_list_with_group( \
|
||||||
|
|
||||||
if (in_bytes < TRACKER_QUERY_STORAGE_STORE_BODY_LEN)
|
if (in_bytes < TRACKER_QUERY_STORAGE_STORE_BODY_LEN)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data "
|
"length: %"PRId64" is invalid, " \
|
||||||
"length: %"PRId64" is invalid, expect length >= %d",
|
"expect length >= %d", __LINE__, \
|
||||||
__LINE__, formatted_ip, pTrackerServer->port,
|
pTrackerServer->ip_addr, pTrackerServer->port, \
|
||||||
in_bytes, TRACKER_QUERY_STORAGE_STORE_BODY_LEN);
|
in_bytes, TRACKER_QUERY_STORAGE_STORE_BODY_LEN);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
@ -1106,23 +1104,22 @@ int tracker_query_storage_store_list_with_group( \
|
||||||
ipPortsLen = in_bytes - (FDFS_GROUP_NAME_MAX_LEN + 1);
|
ipPortsLen = in_bytes - (FDFS_GROUP_NAME_MAX_LEN + 1);
|
||||||
if (ipPortsLen % RECORD_LENGTH != 0)
|
if (ipPortsLen % RECORD_LENGTH != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data "
|
"length: %"PRId64" is invalid", \
|
||||||
"length: %"PRId64" is invalid", __LINE__,
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
formatted_ip, pTrackerServer->port, in_bytes);
|
pTrackerServer->port, in_bytes);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
*storage_count = ipPortsLen / RECORD_LENGTH;
|
*storage_count = ipPortsLen / RECORD_LENGTH;
|
||||||
if (nMaxServerCount < *storage_count)
|
if (nMaxServerCount < *storage_count)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response storage server " \
|
||||||
"tracker server %s:%u response storage server "
|
"count: %d, exceeds max server count: %d!", \
|
||||||
"count: %d, exceeds max server count: %d!",
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
__LINE__, formatted_ip, pTrackerServer->port,
|
pTrackerServer->port, *storage_count, nMaxServerCount);
|
||||||
*storage_count, nMaxServerCount);
|
|
||||||
return ENOSPC;
|
return ENOSPC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1160,7 +1157,6 @@ int tracker_delete_storage(TrackerServerGroup *pTrackerGroup, \
|
||||||
FDFSStorageInfo storage_infos[1];
|
FDFSStorageInfo storage_infos[1];
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
||||||
FDFS_STORAGE_ID_MAX_SIZE];
|
FDFS_STORAGE_ID_MAX_SIZE];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char in_buff[1];
|
char in_buff[1];
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
|
|
@ -1230,13 +1226,13 @@ int tracker_delete_storage(TrackerServerGroup *pTrackerGroup, \
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
||||||
storage_id_len, SF_G_NETWORK_TIMEOUT)) != 0)
|
storage_id_len, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(conn->ip_addr, formatted_ip);
|
|
||||||
logError("file: "__FILE__", line: %d, "
|
logError("file: "__FILE__", line: %d, "
|
||||||
"send data to tracker server %s:%u fail, "
|
"send data to tracker server %s:%d fail, "
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
"errno: %d, error info: %s", __LINE__,
|
||||||
conn->port, result, STRERROR(result));
|
conn->ip_addr, conn->port,
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -1284,7 +1280,6 @@ int tracker_delete_group(TrackerServerGroup *pTrackerGroup, \
|
||||||
TrackerServerInfo *pServer;
|
TrackerServerInfo *pServer;
|
||||||
TrackerServerInfo *pEnd;
|
TrackerServerInfo *pEnd;
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN];
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char in_buff[1];
|
char in_buff[1];
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
|
|
@ -1311,13 +1306,13 @@ int tracker_delete_group(TrackerServerGroup *pTrackerGroup, \
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN,
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN,
|
||||||
SF_G_NETWORK_TIMEOUT)) != 0)
|
g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(conn->ip_addr, formatted_ip);
|
|
||||||
logError("file: "__FILE__", line: %d, "
|
logError("file: "__FILE__", line: %d, "
|
||||||
"send data to tracker server %s:%u fail, "
|
"send data to tracker server %s:%d fail, "
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
"errno: %d, error info: %s", __LINE__,
|
||||||
conn->port, result, STRERROR(result));
|
conn->ip_addr, conn->port,
|
||||||
|
result, STRERROR(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1348,7 +1343,6 @@ int tracker_set_trunk_server(TrackerServerGroup *pTrackerGroup, \
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
||||||
FDFS_STORAGE_ID_MAX_SIZE];
|
FDFS_STORAGE_ID_MAX_SIZE];
|
||||||
char in_buff[FDFS_STORAGE_ID_MAX_SIZE];
|
char in_buff[FDFS_STORAGE_ID_MAX_SIZE];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
int64_t in_bytes;
|
int64_t in_bytes;
|
||||||
int result;
|
int result;
|
||||||
|
|
@ -1386,15 +1380,15 @@ int tracker_set_trunk_server(TrackerServerGroup *pTrackerGroup, \
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN +
|
||||||
storage_id_len, SF_G_NETWORK_TIMEOUT)) != 0)
|
storage_id_len, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(conn->ip_addr, formatted_ip);
|
|
||||||
logError("file: "__FILE__", line: %d, "
|
logError("file: "__FILE__", line: %d, "
|
||||||
"send data to tracker server %s:%u fail, "
|
"send data to tracker server %s:%d fail, "
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
"errno: %d, error info: %s", __LINE__,
|
||||||
conn->port, result, STRERROR(result));
|
conn->ip_addr, conn->port,
|
||||||
|
result, STRERROR(result));
|
||||||
|
|
||||||
tracker_close_connection_ex(conn, true);
|
tracker_close_connection_ex(conn, true);
|
||||||
continue;
|
continue;
|
||||||
|
|
@ -1444,7 +1438,6 @@ int tracker_get_storage_status(ConnectionInfo *pTrackerServer,
|
||||||
bool new_connection;
|
bool new_connection;
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
||||||
IP_ADDRESS_SIZE];
|
IP_ADDRESS_SIZE];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *pInBuff;
|
char *pInBuff;
|
||||||
char *p;
|
char *p;
|
||||||
int result;
|
int result;
|
||||||
|
|
@ -1474,14 +1467,15 @@ int tracker_get_storage_status(ConnectionInfo *pTrackerServer,
|
||||||
}
|
}
|
||||||
pHeader->cmd = TRACKER_PROTO_CMD_STORAGE_GET_STATUS;
|
pHeader->cmd = TRACKER_PROTO_CMD_STORAGE_GET_STATUS;
|
||||||
long2buff(FDFS_GROUP_NAME_MAX_LEN + ip_len, pHeader->pkg_len);
|
long2buff(FDFS_GROUP_NAME_MAX_LEN + ip_len, pHeader->pkg_len);
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -1508,11 +1502,11 @@ int tracker_get_storage_status(ConnectionInfo *pTrackerServer,
|
||||||
|
|
||||||
if (in_bytes != sizeof(FDFSStorageBrief))
|
if (in_bytes != sizeof(FDFSStorageBrief))
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data "
|
"length: %"PRId64" is invalid", \
|
||||||
"length: %"PRId64" is invalid", __LINE__,
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
formatted_ip, pTrackerServer->port, in_bytes);
|
pTrackerServer->port, in_bytes);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1528,7 +1522,6 @@ int tracker_get_storage_id(ConnectionInfo *pTrackerServer, \
|
||||||
bool new_connection;
|
bool new_connection;
|
||||||
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
char out_buff[sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + \
|
||||||
IP_ADDRESS_SIZE];
|
IP_ADDRESS_SIZE];
|
||||||
char formatted_ip[FORMATTED_IP_SIZE];
|
|
||||||
char *p;
|
char *p;
|
||||||
int result;
|
int result;
|
||||||
int ip_len;
|
int ip_len;
|
||||||
|
|
@ -1562,14 +1555,15 @@ int tracker_get_storage_id(ConnectionInfo *pTrackerServer, \
|
||||||
}
|
}
|
||||||
pHeader->cmd = TRACKER_PROTO_CMD_STORAGE_GET_SERVER_ID;
|
pHeader->cmd = TRACKER_PROTO_CMD_STORAGE_GET_SERVER_ID;
|
||||||
long2buff(FDFS_GROUP_NAME_MAX_LEN + ip_len, pHeader->pkg_len);
|
long2buff(FDFS_GROUP_NAME_MAX_LEN + ip_len, pHeader->pkg_len);
|
||||||
if ((result=tcpsenddata_nb(conn->sock, out_buff,
|
if ((result=tcpsenddata_nb(conn->sock, out_buff, \
|
||||||
p - out_buff, SF_G_NETWORK_TIMEOUT)) != 0)
|
p - out_buff, g_fdfs_network_timeout)) != 0)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"send data to tracker server %s:%d fail, " \
|
||||||
"send data to tracker server %s:%u fail, "
|
"errno: %d, error info: %s", __LINE__, \
|
||||||
"errno: %d, error info: %s", __LINE__, formatted_ip,
|
pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, result, STRERROR(result));
|
pTrackerServer->port, \
|
||||||
|
result, STRERROR(result));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
@ -1595,10 +1589,10 @@ int tracker_get_storage_id(ConnectionInfo *pTrackerServer, \
|
||||||
|
|
||||||
if (in_bytes == 0 || in_bytes >= FDFS_STORAGE_ID_MAX_SIZE)
|
if (in_bytes == 0 || in_bytes >= FDFS_STORAGE_ID_MAX_SIZE)
|
||||||
{
|
{
|
||||||
format_ip_address(pTrackerServer->ip_addr, formatted_ip);
|
logError("file: "__FILE__", line: %d, " \
|
||||||
logError("file: "__FILE__", line: %d, "
|
"tracker server %s:%d response data " \
|
||||||
"tracker server %s:%u response data length: %"PRId64" "
|
"length: %"PRId64" is invalid", \
|
||||||
"is invalid", __LINE__, formatted_ip,
|
__LINE__, pTrackerServer->ip_addr, \
|
||||||
pTrackerServer->port, in_bytes);
|
pTrackerServer->port, in_bytes);
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
@ -1660,3 +1654,4 @@ int tracker_get_storage_max_status(TrackerServerGroup *pTrackerGroup, \
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,8 @@ typedef struct
|
||||||
char src_id[FDFS_STORAGE_ID_MAX_SIZE]; //src storage id
|
char src_id[FDFS_STORAGE_ID_MAX_SIZE]; //src storage id
|
||||||
char domain_name[FDFS_DOMAIN_NAME_MAX_SIZE]; //http domain name
|
char domain_name[FDFS_DOMAIN_NAME_MAX_SIZE]; //http domain name
|
||||||
char version[FDFS_VERSION_SIZE];
|
char version[FDFS_VERSION_SIZE];
|
||||||
int64_t total_mb; //total disk storage in MB
|
int total_mb; //total disk storage in MB
|
||||||
int64_t free_mb; //free disk storage in MB
|
int free_mb; //free disk storage in MB
|
||||||
int upload_priority; //upload priority
|
int upload_priority; //upload priority
|
||||||
time_t join_time; //storage join timestamp (create timestamp)
|
time_t join_time; //storage join timestamp (create timestamp)
|
||||||
time_t up_time; //storage service started timestamp
|
time_t up_time; //storage service started timestamp
|
||||||
|
|
|
||||||
|
|
@ -20,11 +20,13 @@
|
||||||
#include "fastcommon/logger.h"
|
#include "fastcommon/logger.h"
|
||||||
#include "fdfs_global.h"
|
#include "fdfs_global.h"
|
||||||
|
|
||||||
Version g_fdfs_version = {6, 12, 2};
|
int g_fdfs_connect_timeout = DEFAULT_CONNECT_TIMEOUT;
|
||||||
|
int g_fdfs_network_timeout = DEFAULT_NETWORK_TIMEOUT;
|
||||||
|
char g_fdfs_base_path[MAX_PATH_SIZE] = {'/', 't', 'm', 'p', '\0'};
|
||||||
|
Version g_fdfs_version = {6, 5};
|
||||||
bool g_use_connection_pool = false;
|
bool g_use_connection_pool = false;
|
||||||
ConnectionPool g_connection_pool;
|
ConnectionPool g_connection_pool;
|
||||||
int g_connection_pool_max_idle_time = 3600;
|
int g_connection_pool_max_idle_time = 3600;
|
||||||
struct base64_context g_fdfs_base64_context;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
data filename format:
|
data filename format:
|
||||||
|
|
|
||||||
|
|
@ -12,10 +12,8 @@
|
||||||
#define _FDFS_GLOBAL_H
|
#define _FDFS_GLOBAL_H
|
||||||
|
|
||||||
#include "fastcommon/common_define.h"
|
#include "fastcommon/common_define.h"
|
||||||
#include "fastcommon/base64.h"
|
|
||||||
#include "fastcommon/connection_pool.h"
|
|
||||||
#include "sf/sf_global.h"
|
|
||||||
#include "fdfs_define.h"
|
#include "fdfs_define.h"
|
||||||
|
#include "fastcommon/connection_pool.h"
|
||||||
|
|
||||||
#define FDFS_FILE_EXT_NAME_MAX_LEN 6
|
#define FDFS_FILE_EXT_NAME_MAX_LEN 6
|
||||||
|
|
||||||
|
|
@ -23,11 +21,13 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern int g_fdfs_connect_timeout;
|
||||||
|
extern int g_fdfs_network_timeout;
|
||||||
|
extern char g_fdfs_base_path[MAX_PATH_SIZE];
|
||||||
extern Version g_fdfs_version;
|
extern Version g_fdfs_version;
|
||||||
extern bool g_use_connection_pool;
|
extern bool g_use_connection_pool;
|
||||||
extern ConnectionPool g_connection_pool;
|
extern ConnectionPool g_connection_pool;
|
||||||
extern int g_connection_pool_max_idle_time;
|
extern int g_connection_pool_max_idle_time;
|
||||||
extern struct base64_context g_fdfs_base64_context;
|
|
||||||
|
|
||||||
int fdfs_check_data_filename(const char *filename, const int len);
|
int fdfs_check_data_filename(const char *filename, const int len);
|
||||||
int fdfs_gen_slave_filename(const char *master_filename, \
|
int fdfs_gen_slave_filename(const char *master_filename, \
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ int fdfs_http_get_content_type_by_extname(FDFSHTTPParams *pParams, \
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pHashData = fc_hash_find_ex(&pParams->content_type_hash, \
|
pHashData = hash_find_ex(&pParams->content_type_hash, \
|
||||||
ext_name, ext_len + 1);
|
ext_name, ext_len + 1);
|
||||||
if (pHashData == NULL)
|
if (pHashData == NULL)
|
||||||
{
|
{
|
||||||
|
|
@ -282,7 +282,7 @@ int fdfs_http_params_load(IniContext *pIniContext, \
|
||||||
|
|
||||||
if (!(pParams->need_find_content_type || pParams->support_multi_range))
|
if (!(pParams->need_find_content_type || pParams->support_multi_range))
|
||||||
{
|
{
|
||||||
fc_hash_destroy(&pParams->content_type_hash);
|
hash_destroy(&pParams->content_type_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((result=getFileContent(token_check_fail_filename, \
|
if ((result=getFileContent(token_check_fail_filename, \
|
||||||
|
|
@ -301,7 +301,7 @@ void fdfs_http_params_destroy(FDFSHTTPParams *pParams)
|
||||||
{
|
{
|
||||||
if (!(pParams->need_find_content_type || pParams->support_multi_range))
|
if (!(pParams->need_find_content_type || pParams->support_multi_range))
|
||||||
{
|
{
|
||||||
fc_hash_destroy(&pParams->content_type_hash);
|
hash_destroy(&pParams->content_type_hash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -69,11 +69,11 @@ int load_mime_types_from_file(HashArray *pHash, const char *mime_filename)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((result=fc_hash_init_ex(pHash, PJWHash, 2 * 1024, 0.75, 0, true)) != 0)
|
if ((result=hash_init_ex(pHash, PJWHash, 2 * 1024, 0.75, 0, true)) != 0)
|
||||||
{
|
{
|
||||||
free(content);
|
free(content);
|
||||||
logError("file: "__FILE__", line: %d, " \
|
logError("file: "__FILE__", line: %d, " \
|
||||||
"fc_hash_init_ex fail, errno: %d, error info: %s", \
|
"hash_init_ex fail, errno: %d, error info: %s", \
|
||||||
__LINE__, result, STRERROR(result));
|
__LINE__, result, STRERROR(result));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
@ -108,14 +108,14 @@ int load_mime_types_from_file(HashArray *pHash, const char *mime_filename)
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((result=fc_hash_insert_ex(pHash, ext_name, \
|
if ((result=hash_insert_ex(pHash, ext_name, \
|
||||||
strlen(ext_name)+1, content_type, \
|
strlen(ext_name)+1, content_type, \
|
||||||
strlen(content_type)+1, true)) < 0)
|
strlen(content_type)+1, true)) < 0)
|
||||||
{
|
{
|
||||||
free(content);
|
free(content);
|
||||||
result *= -1;
|
result *= -1;
|
||||||
logError("file: "__FILE__", line: %d, " \
|
logError("file: "__FILE__", line: %d, " \
|
||||||
"fc_hash_insert_ex fail, errno: %d, " \
|
"hash_insert_ex fail, errno: %d, " \
|
||||||
"error info: %s", __LINE__, \
|
"error info: %s", __LINE__, \
|
||||||
result, STRERROR(result));
|
result, STRERROR(result));
|
||||||
return result;
|
return result;
|
||||||
|
|
@ -125,7 +125,7 @@ int load_mime_types_from_file(HashArray *pHash, const char *mime_filename)
|
||||||
|
|
||||||
free(content);
|
free(content);
|
||||||
|
|
||||||
//fc_hash_stat_print(pHash);
|
//hash_stat_print(pHash);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ connect_timeout = 5
|
||||||
network_timeout = 60
|
network_timeout = 60
|
||||||
|
|
||||||
# the base path to store log files
|
# the base path to store log files
|
||||||
base_path = /opt/fastdfs
|
base_path = /home/yuqing/fastdfs
|
||||||
|
|
||||||
# tracker_server can ocur more than once for multi tracker servers.
|
# tracker_server can ocur more than once for multi tracker servers.
|
||||||
# the value format of tracker_server is "HOST:PORT",
|
# the value format of tracker_server is "HOST:PORT",
|
||||||
|
|
@ -16,13 +16,9 @@ base_path = /opt/fastdfs
|
||||||
# and the HOST can be dual IPs or hostnames seperated by comma,
|
# and the HOST can be dual IPs or hostnames seperated by comma,
|
||||||
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
||||||
# or two different types of inner (intranet) IPs.
|
# or two different types of inner (intranet) IPs.
|
||||||
# IPv4:
|
|
||||||
# for example: 192.168.2.100,122.244.141.46:22122
|
# for example: 192.168.2.100,122.244.141.46:22122
|
||||||
# another eg.: 192.168.1.10,172.17.4.21:22122
|
# another eg.: 192.168.1.10,172.17.4.21:22122
|
||||||
#
|
|
||||||
# IPv6:
|
|
||||||
# for example: [2409:8a20:42d:2f40:587a:4c47:72c0:ad8e,fe80::1ee9:90a8:1351:436c]:22122
|
|
||||||
#
|
|
||||||
tracker_server = 192.168.0.196:22122
|
tracker_server = 192.168.0.196:22122
|
||||||
tracker_server = 192.168.0.197:22122
|
tracker_server = 192.168.0.197:22122
|
||||||
|
|
||||||
|
|
@ -37,13 +33,6 @@ tracker_server = 192.168.0.197:22122
|
||||||
### debug
|
### debug
|
||||||
log_level = info
|
log_level = info
|
||||||
|
|
||||||
# connect which ip address first for multi IPs of a storage server, value list:
|
|
||||||
## tracker: connect to the ip address return by tracker server first
|
|
||||||
## last-connected: connect to the ip address last connected first
|
|
||||||
# default value is tracker
|
|
||||||
# since V6.11
|
|
||||||
connect_first_by = tracker
|
|
||||||
|
|
||||||
# if use connection pool
|
# if use connection pool
|
||||||
# default value is false
|
# default value is false
|
||||||
# since V4.05
|
# since V4.05
|
||||||
|
|
|
||||||
|
|
@ -12,15 +12,6 @@ group_name = group1
|
||||||
|
|
||||||
# bind an address of this host
|
# bind an address of this host
|
||||||
# empty for bind all addresses of this host
|
# empty for bind all addresses of this host
|
||||||
#
|
|
||||||
# bind IPv4 example: 192.168.2.100
|
|
||||||
#
|
|
||||||
# bind IPv6 example: 2409:8a20:42d:2f40:587a:4c47:72c0:ad8e
|
|
||||||
#
|
|
||||||
# bind IPv4 and IPv6 example: 192.168.2.100,2409:8a20:42d:2f40:587a:4c47:72c0:ad8e
|
|
||||||
#
|
|
||||||
# as any/all addresses, IPv4 is 0.0.0.0, IPv6 is ::
|
|
||||||
#
|
|
||||||
bind_addr =
|
bind_addr =
|
||||||
|
|
||||||
# if bind an address of this host when connect to other servers
|
# if bind an address of this host when connect to other servers
|
||||||
|
|
@ -32,27 +23,6 @@ client_bind = true
|
||||||
# the storage server port
|
# the storage server port
|
||||||
port = 23000
|
port = 23000
|
||||||
|
|
||||||
# the address family of service, value list:
|
|
||||||
## IPv4: IPv4 stack
|
|
||||||
## IPv6: IPv6 stack
|
|
||||||
## auto: auto detect by bind_addr, IPv4 first then IPv6 when bind_addr is empty
|
|
||||||
## both: IPv4 and IPv6 dual stacks
|
|
||||||
# default value is auto
|
|
||||||
# since V6.11
|
|
||||||
address_family = auto
|
|
||||||
|
|
||||||
|
|
||||||
# specify the storage server ID for NAT network
|
|
||||||
# NOT set or commented for auto set by the local ip addresses
|
|
||||||
# since V6.11
|
|
||||||
#
|
|
||||||
# NOTE:
|
|
||||||
## * this paramter is valid only when use_storage_id and trust_storage_server_id
|
|
||||||
## in tracker.conf set to true
|
|
||||||
## * the storage server id must exist in storage_ids.conf
|
|
||||||
#server_id =
|
|
||||||
|
|
||||||
|
|
||||||
# connect timeout in seconds
|
# connect timeout in seconds
|
||||||
# default value is 30
|
# default value is 30
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
# Note: in the intranet network (LAN), 2 seconds is enough.
|
||||||
|
|
@ -76,7 +46,7 @@ stat_report_interval = 60
|
||||||
# NOTE: the binlog files maybe are large, make sure
|
# NOTE: the binlog files maybe are large, make sure
|
||||||
# the base path has enough disk space,
|
# the base path has enough disk space,
|
||||||
# eg. the disk free space should > 50GB
|
# eg. the disk free space should > 50GB
|
||||||
base_path = /opt/fastdfs
|
base_path = /home/yuqing/fastdfs
|
||||||
|
|
||||||
# max concurrent connections the server supported,
|
# max concurrent connections the server supported,
|
||||||
# you should set this parameter larger, eg. 10240
|
# you should set this parameter larger, eg. 10240
|
||||||
|
|
@ -156,8 +126,8 @@ store_path_count = 1
|
||||||
# the store paths' order is very important, don't mess up!!!
|
# the store paths' order is very important, don't mess up!!!
|
||||||
# the base_path should be independent (different) of the store paths
|
# the base_path should be independent (different) of the store paths
|
||||||
|
|
||||||
store_path0 = /opt/fastdfs
|
store_path0 = /home/yuqing/fastdfs
|
||||||
#store_path1 = /opt/fastdfs2
|
#store_path1 = /home/yuqing/fastdfs2
|
||||||
|
|
||||||
# subdir_count * subdir_count directories will be auto created under each
|
# subdir_count * subdir_count directories will be auto created under each
|
||||||
# store_path (disk), value can be 1 to 256, default value is 256
|
# store_path (disk), value can be 1 to 256, default value is 256
|
||||||
|
|
@ -169,13 +139,9 @@ subdir_count_per_path = 256
|
||||||
# and the HOST can be dual IPs or hostnames seperated by comma,
|
# and the HOST can be dual IPs or hostnames seperated by comma,
|
||||||
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
||||||
# or two different types of inner (intranet) IPs.
|
# or two different types of inner (intranet) IPs.
|
||||||
# IPv4:
|
|
||||||
# for example: 192.168.2.100,122.244.141.46:22122
|
# for example: 192.168.2.100,122.244.141.46:22122
|
||||||
# another eg.: 192.168.1.10,172.17.4.21:22122
|
# another eg.: 192.168.1.10,172.17.4.21:22122
|
||||||
#
|
|
||||||
# IPv6:
|
|
||||||
# for example: [2409:8a20:42d:2f40:587a:4c47:72c0:ad8e,fe80::1ee9:90a8:1351:436c]:22122
|
|
||||||
#
|
|
||||||
tracker_server = 192.168.209.121:22122
|
tracker_server = 192.168.209.121:22122
|
||||||
tracker_server = 192.168.209.122:22122
|
tracker_server = 192.168.209.122:22122
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -6,17 +6,11 @@
|
||||||
# storage ip or hostname can be dual IPs seperated by comma,
|
# storage ip or hostname can be dual IPs seperated by comma,
|
||||||
# one is an inner (intranet) IP and another is an outer (extranet) IP,
|
# one is an inner (intranet) IP and another is an outer (extranet) IP,
|
||||||
# or two different types of inner (intranet) IPs
|
# or two different types of inner (intranet) IPs
|
||||||
# IPv4:
|
|
||||||
# for example: 192.168.2.100,122.244.141.46
|
# for example: 192.168.2.100,122.244.141.46
|
||||||
# another eg.: 192.168.1.10,172.17.4.21
|
# another eg.: 192.168.1.10,172.17.4.21
|
||||||
#
|
#
|
||||||
# IPv6:
|
|
||||||
# or example: [2409:8a20:42d:2f40:587a:4c47:72c0:ad8e,fe80::1ee9:90a8:1351:436c]
|
|
||||||
# another eg.: [2409:8a20:42d:2f40:587a:4c47:72c0:ad8e,fe80::1ee9:90a8:1351:436c]:100002
|
|
||||||
#
|
|
||||||
# the port is optional. if you run more than one storaged instances
|
# the port is optional. if you run more than one storaged instances
|
||||||
# in a server, you must specified the port to distinguish different instances.
|
# in a server, you must specified the port to distinguish different instances.
|
||||||
|
|
||||||
100001 group1 192.168.0.196
|
100001 group1 192.168.0.196
|
||||||
100002 group1 192.168.0.197
|
100002 group1 192.168.0.197
|
||||||
100003 group1 [2409:8a20:42d:2f40:587a:4c47:72c0:ad8e]:100002
|
|
||||||
|
|
|
||||||
|
|
@ -5,33 +5,11 @@ disabled = false
|
||||||
|
|
||||||
# bind an address of this host
|
# bind an address of this host
|
||||||
# empty for bind all addresses of this host
|
# empty for bind all addresses of this host
|
||||||
#
|
|
||||||
# bind IPv4 example: 192.168.2.100
|
|
||||||
#
|
|
||||||
# bind IPv6 example: 2409:8a20:42d:2f40:587a:4c47:72c0:ad8e
|
|
||||||
#
|
|
||||||
# bind IPv4 and IPv6 example: 192.168.2.100,2409:8a20:42d:2f40:587a:4c47:72c0:ad8e
|
|
||||||
#
|
|
||||||
# as any/all addresses, IPv4 is 0.0.0.0, IPv6 is ::
|
|
||||||
#
|
|
||||||
bind_addr =
|
bind_addr =
|
||||||
|
|
||||||
# the tracker server port
|
# the tracker server port
|
||||||
port = 22122
|
port = 22122
|
||||||
|
|
||||||
# the address family of service, value list:
|
|
||||||
## IPv4: IPv4 stack
|
|
||||||
## IPv6: IPv6 stack
|
|
||||||
## auto: auto detect by bind_addr, IPv4 first then IPv6 when bind_addr is empty
|
|
||||||
## both: IPv4 and IPv6 dual stacks
|
|
||||||
#
|
|
||||||
# following parameter use_storage_id MUST set to true and
|
|
||||||
# id_type_in_filename MUST set to id when IPv6 enabled
|
|
||||||
#
|
|
||||||
# default value is auto
|
|
||||||
# since V6.11
|
|
||||||
address_family = auto
|
|
||||||
|
|
||||||
# connect timeout in seconds
|
# connect timeout in seconds
|
||||||
# default value is 30
|
# default value is 30
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
# Note: in the intranet network (LAN), 2 seconds is enough.
|
||||||
|
|
@ -42,7 +20,7 @@ connect_timeout = 5
|
||||||
network_timeout = 60
|
network_timeout = 60
|
||||||
|
|
||||||
# the base path to store data and log files
|
# the base path to store data and log files
|
||||||
base_path = /opt/fastdfs
|
base_path = /home/yuqing/fastdfs
|
||||||
|
|
||||||
# max concurrent connections this server support
|
# max concurrent connections this server support
|
||||||
# you should set this parameter larger, eg. 10240
|
# you should set this parameter larger, eg. 10240
|
||||||
|
|
@ -103,12 +81,7 @@ download_server = 0
|
||||||
### M or m for megabyte(MB)
|
### M or m for megabyte(MB)
|
||||||
### K or k for kilobyte(KB)
|
### K or k for kilobyte(KB)
|
||||||
### no unit for byte(B)
|
### no unit for byte(B)
|
||||||
#
|
|
||||||
### XX.XX% as ratio such as: reserved_storage_space = 10%
|
### XX.XX% as ratio such as: reserved_storage_space = 10%
|
||||||
#
|
|
||||||
# NOTE:
|
|
||||||
## the absolute reserved space is the sum of all store paths in the storage server
|
|
||||||
## the reserved space ratio is for each store path
|
|
||||||
reserved_storage_space = 20%
|
reserved_storage_space = 20%
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
#standard log level as syslog, case insensitive, value list:
|
||||||
|
|
@ -287,13 +260,6 @@ storage_ids_filename = storage_ids.conf
|
||||||
# since V4.03
|
# since V4.03
|
||||||
id_type_in_filename = id
|
id_type_in_filename = id
|
||||||
|
|
||||||
# if trust the storage server ID sent by the storage server
|
|
||||||
# this paramter is valid only when use_storage_id set to true
|
|
||||||
# default value is true
|
|
||||||
# since V6.11
|
|
||||||
trust_storage_server_id = true
|
|
||||||
|
|
||||||
|
|
||||||
# if store slave file use symbol link
|
# if store slave file use symbol link
|
||||||
# default value is false
|
# default value is false
|
||||||
# since V4.01
|
# since V4.01
|
||||||
|
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
||||||
FastDFS is an open source high performance distributed file system. Its major
|
|
||||||
functions include: file storing, file syncing and file accessing (file uploading
|
|
||||||
and file downloading), and it can resolve the high capacity and load balancing
|
|
||||||
problem. FastDFS should meet the requirement of the website whose service based
|
|
||||||
on files such as photo sharing site and video sharing site.
|
|
||||||
|
|
@ -1,7 +0,0 @@
|
||||||
fastdfs (6.12.1-1) stable; urgency=medium
|
|
||||||
|
|
||||||
* adapt to libserverframe 1.2.3
|
|
||||||
* bugfixed: notify_leader_changed support IPv6 correctly
|
|
||||||
* log square quoted IPv6 address
|
|
||||||
|
|
||||||
-- YuQing <384681@qq.com> Wed, 6 Mar 2024 15:14:27 +0000
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
11
|
|
||||||
|
|
@ -1,56 +0,0 @@
|
||||||
Source: fastdfs
|
|
||||||
Section: admin
|
|
||||||
Priority: optional
|
|
||||||
Maintainer: YuQing <384681@qq.com>
|
|
||||||
Build-Depends: debhelper (>=11~),
|
|
||||||
libfastcommon-dev (>= 1.0.73),
|
|
||||||
libserverframe-dev (>= 1.2.3)
|
|
||||||
Standards-Version: 4.1.4
|
|
||||||
Homepage: http://github.com/happyfish100/fastdfs/
|
|
||||||
|
|
||||||
Package: fastdfs
|
|
||||||
Architecture: linux-any
|
|
||||||
Multi-Arch: foreign
|
|
||||||
Depends: fastdfs-server (= ${binary:Version}),
|
|
||||||
fastdfs-tool (= ${binary:Version}),
|
|
||||||
${misc:Depends}
|
|
||||||
Description: FastDFS server and client
|
|
||||||
|
|
||||||
Package: fastdfs-server
|
|
||||||
Architecture: linux-any
|
|
||||||
Multi-Arch: foreign
|
|
||||||
Depends: libfastcommon (>= ${libfastcommon:Version}),
|
|
||||||
libserverframe (>= ${libserverframe:Version}),
|
|
||||||
fastdfs-config (>= ${fastdfs-config:Version}),
|
|
||||||
${misc:Depends}, ${shlibs:Depends}
|
|
||||||
Description: FastDFS server
|
|
||||||
|
|
||||||
Package: libfdfsclient
|
|
||||||
Architecture: linux-any
|
|
||||||
Multi-Arch: foreign
|
|
||||||
Depends: libfastcommon (>= ${libfastcommon:Version}),
|
|
||||||
libserverframe (>= ${libserverframe:Version}),
|
|
||||||
${misc:Depends}, ${shlibs:Depends}
|
|
||||||
Description: FastDFS client tools
|
|
||||||
|
|
||||||
Package: libfdfsclient-dev
|
|
||||||
Architecture: linux-any
|
|
||||||
Multi-Arch: foreign
|
|
||||||
Depends: libfdfsclient (= ${binary:Version}),
|
|
||||||
${misc:Depends}
|
|
||||||
Description: header files of FastDFS client library
|
|
||||||
This package provides the header files of libfdfsclient
|
|
||||||
|
|
||||||
Package: fastdfs-tool
|
|
||||||
Architecture: linux-any
|
|
||||||
Multi-Arch: foreign
|
|
||||||
Depends: libfdfsclient (= ${binary:Version}),
|
|
||||||
fastdfs-config (>= ${fastdfs-config:Version}),
|
|
||||||
${misc:Depends}, ${shlibs:Depends}
|
|
||||||
Description: FastDFS client tools
|
|
||||||
|
|
||||||
Package: fastdfs-config
|
|
||||||
Architecture: linux-any
|
|
||||||
Multi-Arch: foreign
|
|
||||||
Description: FastDFS config files for sample
|
|
||||||
FastDFS config files for sample including server and client
|
|
||||||
|
|
@ -1,675 +0,0 @@
|
||||||
GNU GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Preamble
|
|
||||||
|
|
||||||
The GNU General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works.
|
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
|
||||||
GNU General Public License for most of our software; it applies also to
|
|
||||||
any other work released this way by its authors. You can apply it to
|
|
||||||
your programs, too.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
|
||||||
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
etc/fdfs/*.conf
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
opt/fastdfs
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
usr/bin/fdfs_trackerd
|
|
||||||
usr/bin/fdfs_storaged
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
opt/fastdfs
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
usr/bin/fdfs_monitor
|
|
||||||
usr/bin/fdfs_test
|
|
||||||
usr/bin/fdfs_test1
|
|
||||||
usr/bin/fdfs_crc32
|
|
||||||
usr/bin/fdfs_upload_file
|
|
||||||
usr/bin/fdfs_download_file
|
|
||||||
usr/bin/fdfs_delete_file
|
|
||||||
usr/bin/fdfs_file_info
|
|
||||||
usr/bin/fdfs_appender_test
|
|
||||||
usr/bin/fdfs_appender_test1
|
|
||||||
usr/bin/fdfs_append_file
|
|
||||||
usr/bin/fdfs_upload_appender
|
|
||||||
usr/bin/fdfs_regenerate_filename
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
usr/include/fastdfs/*
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
usr/lib/libfdfsclient*
|
|
||||||
|
|
@ -1,30 +0,0 @@
|
||||||
#!/usr/bin/make -f
|
|
||||||
|
|
||||||
export DH_VERBOSE=1
|
|
||||||
export DESTDIR=$(CURDIR)/debian/tmp
|
|
||||||
|
|
||||||
export CONFDIR=$(DESTDIR)/etc/fdfs/
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@
|
|
||||||
|
|
||||||
|
|
||||||
override_dh_auto_build:
|
|
||||||
./make.sh clean && DESTDIR=$(DESTDIR) ./make.sh
|
|
||||||
|
|
||||||
override_dh_auto_install:
|
|
||||||
DESTDIR=$(DESTDIR) ./make.sh install
|
|
||||||
mkdir -p $(CONFDIR)
|
|
||||||
cp conf/*.conf $(CONFDIR)
|
|
||||||
cp systemd/fdfs_storaged.service debian/fastdfs-server.fdfs_storaged.service
|
|
||||||
cp systemd/fdfs_trackerd.service debian/fastdfs-server.fdfs_trackerd.service
|
|
||||||
|
|
||||||
dh_auto_install
|
|
||||||
|
|
||||||
override_dh_installsystemd:
|
|
||||||
dh_installsystemd --package=fastdfs-server --name=fdfs_storaged --no-start --no-restart-on-upgrade
|
|
||||||
dh_installsystemd --package=fastdfs-server --name=fdfs_trackerd --no-start --no-restart-on-upgrade
|
|
||||||
|
|
||||||
.PHONY: override_dh_gencontrol
|
|
||||||
override_dh_gencontrol:
|
|
||||||
dh_gencontrol -- -Tdebian/substvars
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
3.0 (quilt)
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
libfastcommon:Version=1.0.73
|
|
||||||
libserverframe:Version=1.2.3
|
|
||||||
fastdfs-config:Version=1.0.0
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
version=3
|
|
||||||
opts="mode=git" https://github.com/happyfish100/fastdfs.git \
|
|
||||||
refs/tags/v([\d\.]+) debian uupdate
|
|
||||||
|
|
@ -1,22 +0,0 @@
|
||||||
# FastDFS Dockerfile local (本地包构建)
|
|
||||||
|
|
||||||
感谢余大的杰作!
|
|
||||||
|
|
||||||
本目录包含了docker构建镜像,集群安装帮助手册
|
|
||||||
|
|
||||||
1、目录结构
|
|
||||||
./build_image-v6.0.9 fastdfs-v6.0.9版本的构建docker镜像
|
|
||||||
|
|
||||||
./fastdfs-conf 配置文件,其实和build_image_v.x下的文件是相同的。
|
|
||||||
|--setting_conf.sh 设置配置文件的脚本
|
|
||||||
|
|
||||||
./自定义镜像和安装手册.txt
|
|
||||||
|
|
||||||
./qa.txt 来自于bbs论坛的问题整理:http://bbs.chinaunix.net/forum-240-1.html
|
|
||||||
|
|
||||||
|
|
||||||
2、fastdfs 版本安装变化
|
|
||||||
|
|
||||||
+ v6.0.9 依赖libevent、libfastcommon和libserverframe, v6.0.8及以下依赖libevent和libfastcommon两个库,其中libfastcommon是 FastDFS 自身提供的。
|
|
||||||
|
|
||||||
+ v6.0.9 适配fastdfs-nginx-module-1.23(及以上版本),v6.0.8及以下是fastdfs-nginx-module-1.22
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
||||||
# 选择系统镜像作为基础镜像,可以使用超小的Linux镜像alpine
|
|
||||||
#FROM centos:7
|
|
||||||
FROM alpine:3.12
|
|
||||||
|
|
||||||
LABEL MAINTAINER liyanjing 284223249@qq.com
|
|
||||||
|
|
||||||
# 0.安装包位置,fdfs的基本目录和存储目录
|
|
||||||
ENV INSTALL_PATH=/usr/local/src \
|
|
||||||
LIBFASTCOMMON_VERSION="1.0.57" \
|
|
||||||
FASTDFS_VERSION="6.08" \
|
|
||||||
FASTDFS_NGINX_MODULE_VERSION="1.22" \
|
|
||||||
NGINX_VERSION="1.22.0" \
|
|
||||||
TENGINE_VERSION="2.3.3"
|
|
||||||
|
|
||||||
# 0.change the system source for installing libs
|
|
||||||
RUN echo "http://mirrors.aliyun.com/alpine/v3.12/main" > /etc/apk/repositories \
|
|
||||||
&& echo "http://mirrors.aliyun.com/alpine/v3.12/community" >> /etc/apk/repositories
|
|
||||||
|
|
||||||
# 1.复制安装包
|
|
||||||
ADD soft ${INSTALL_PATH}
|
|
||||||
|
|
||||||
# 2.环境安装
|
|
||||||
# - 创建fdfs的存储目录
|
|
||||||
# - 安装依赖
|
|
||||||
# - 安装libfastcommon
|
|
||||||
# - 安装fastdfs
|
|
||||||
# - 安装nginx,设置nginx和fastdfs联合环境,并配置nginx
|
|
||||||
#Run yum -y install -y gcc gcc-c++ libevent libevent-devel make automake autoconf libtool perl pcre pcre-devel zlib zlib-devel openssl openssl-devel zip unzip net-tools wget vim lsof \
|
|
||||||
RUN apk update && apk add --no-cache --virtual .build-deps bash autoconf gcc libc-dev make pcre-dev zlib-dev linux-headers gnupg libxslt-dev gd-dev geoip-dev wget \
|
|
||||||
&& cd ${INSTALL_PATH} \
|
|
||||||
&& tar -zxf libfastcommon-${LIBFASTCOMMON_VERSION}.tar.gz \
|
|
||||||
&& tar -zxf fastdfs-${FASTDFS_VERSION}.tar.gz \
|
|
||||||
&& tar -zxf fastdfs-nginx-module-${FASTDFS_NGINX_MODULE_VERSION}.tar.gz \
|
|
||||||
&& tar -zxf nginx-${NGINX_VERSION}.tar.gz \
|
|
||||||
\
|
|
||||||
&& cd ${INSTALL_PATH}/libfastcommon-${LIBFASTCOMMON_VERSION}/ \
|
|
||||||
&& ./make.sh \
|
|
||||||
&& ./make.sh install \
|
|
||||||
&& cd ${INSTALL_PATH}/fastdfs-${FASTDFS_VERSION}/ \
|
|
||||||
&& ./make.sh \
|
|
||||||
&& ./make.sh install \
|
|
||||||
\
|
|
||||||
&& cd ${INSTALL_PATH}/nginx-${NGINX_VERSION}/ \
|
|
||||||
&& ./configure --prefix=/usr/local/nginx --pid-path=/var/run/nginx/nginx.pid --with-http_stub_status_module --with-http_gzip_static_module --with-http_realip_module --with-http_sub_module --with-stream=dynamic \
|
|
||||||
--add-module=${INSTALL_PATH}/fastdfs-nginx-module-${FASTDFS_NGINX_MODULE_VERSION}/src/ \
|
|
||||||
&& make \
|
|
||||||
&& make install \
|
|
||||||
\
|
|
||||||
&& rm -rf ${INSTALL_PATH}/* \
|
|
||||||
&& apk del .build-deps gcc libc-dev make linux-headers gnupg libxslt-dev gd-dev geoip-dev wget
|
|
||||||
|
|
||||||
# 3.添加配置文件,目标路径以/结尾,docker会把它当作目录,不存在时,会自动创建
|
|
||||||
COPY conf/*.* /etc/fdfs/
|
|
||||||
COPY nginx_conf/nginx.conf /usr/local/nginx/conf/
|
|
||||||
COPY nginx_conf.d/*.conf /usr/local/nginx/conf.d/
|
|
||||||
COPY start.sh /
|
|
||||||
|
|
||||||
|
|
||||||
ENV TZ=Asia/Shanghai
|
|
||||||
|
|
||||||
# 4.更改启动脚本执行权限,设置时区为中国时间
|
|
||||||
RUN chmod u+x /start.sh \
|
|
||||||
&& apk add --no-cache bash pcre-dev zlib-dev \
|
|
||||||
\
|
|
||||||
&& apk add -U tzdata \
|
|
||||||
&& ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone \
|
|
||||||
&& apk del tzdata && rm -rf /var/cache/apk/*
|
|
||||||
|
|
||||||
EXPOSE 22122 23000 9088
|
|
||||||
|
|
||||||
WORKDIR /
|
|
||||||
|
|
||||||
# 镜像启动
|
|
||||||
ENTRYPOINT ["/bin/bash","/start.sh"]
|
|
||||||
|
|
@ -1,71 +0,0 @@
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the base path to store log files
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# tracker_server can ocur more than once for multi tracker servers.
|
|
||||||
# the value format of tracker_server is "HOST:PORT",
|
|
||||||
# the HOST can be hostname or ip address,
|
|
||||||
# and the HOST can be dual IPs or hostnames seperated by comma,
|
|
||||||
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs.
|
|
||||||
# for example: 192.168.2.100,122.244.141.46:22122
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21:22122
|
|
||||||
|
|
||||||
tracker_server = 192.168.0.196:22122
|
|
||||||
tracker_server = 192.168.0.197:22122
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = false
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# if load FastDFS parameters from tracker server
|
|
||||||
# since V4.05
|
|
||||||
# default value is false
|
|
||||||
load_fdfs_parameters_from_tracker = false
|
|
||||||
|
|
||||||
# if use storage ID instead of IP address
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V4.05
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
|
|
||||||
#HTTP settings
|
|
||||||
http.tracker_server_port = 80
|
|
||||||
|
|
||||||
#use "#include" directive to include HTTP other settiongs
|
|
||||||
##include http.conf
|
|
||||||
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
# HTTP default content type
|
|
||||||
http.default_content_type = application/octet-stream
|
|
||||||
|
|
||||||
# MIME types mapping filename
|
|
||||||
# MIME types file format: MIME_type extensions
|
|
||||||
# such as: image/jpeg jpeg jpg jpe
|
|
||||||
# you can use apache's MIME file: mime.types
|
|
||||||
http.mime_types_filename = mime.types
|
|
||||||
|
|
||||||
# if use token to anti-steal
|
|
||||||
# default value is false (0)
|
|
||||||
http.anti_steal.check_token = false
|
|
||||||
|
|
||||||
# token TTL (time to live), seconds
|
|
||||||
# default value is 600
|
|
||||||
http.anti_steal.token_ttl = 900
|
|
||||||
|
|
||||||
# secret key to generate anti-steal token
|
|
||||||
# this parameter must be set when http.anti_steal.check_token set to true
|
|
||||||
# the length of the secret key should not exceed 128 bytes
|
|
||||||
http.anti_steal.secret_key = FastDFS1234567890
|
|
||||||
|
|
||||||
# return the content of the file when check token fail
|
|
||||||
# default value is empty (no file sepecified)
|
|
||||||
http.anti_steal.token_check_fail = /home/yuqing/fastdfs/conf/anti-steal.jpg
|
|
||||||
|
|
||||||
# if support multi regions for HTTP Range
|
|
||||||
# default value is true
|
|
||||||
http.multi_range.enabed = true
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,137 +0,0 @@
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
connect_timeout=15
|
|
||||||
|
|
||||||
# network recv and send timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
network_timeout=30
|
|
||||||
|
|
||||||
# the base path to store log files
|
|
||||||
base_path=/data/fastdfs_data
|
|
||||||
|
|
||||||
# if load FastDFS parameters from tracker server
|
|
||||||
# since V1.12
|
|
||||||
# default value is false
|
|
||||||
load_fdfs_parameters_from_tracker=true
|
|
||||||
|
|
||||||
# storage sync file max delay seconds
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V1.12
|
|
||||||
# default value is 86400 seconds (one day)
|
|
||||||
storage_sync_file_max_delay = 86400
|
|
||||||
|
|
||||||
# if use storage ID instead of IP address
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# default value is false
|
|
||||||
# since V1.13
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V1.13
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
# FastDFS tracker_server can ocur more than once, and tracker_server format is
|
|
||||||
# "host:port", host can be hostname or ip address
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is true
|
|
||||||
tracker_server = 192.168.209.121:22122
|
|
||||||
tracker_server = 192.168.209.122:22122
|
|
||||||
|
|
||||||
# the port of the local storage server
|
|
||||||
# the default value is 23000
|
|
||||||
storage_server_port=23000
|
|
||||||
|
|
||||||
# the group name of the local storage server
|
|
||||||
group_name=group1
|
|
||||||
|
|
||||||
# if the url / uri including the group name
|
|
||||||
# set to false when uri like /M00/00/00/xxx
|
|
||||||
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx
|
|
||||||
# default value is false
|
|
||||||
url_have_group_name = true
|
|
||||||
|
|
||||||
# path(disk or mount point) count, default value is 1
|
|
||||||
# must same as storage.conf
|
|
||||||
store_path_count=1
|
|
||||||
|
|
||||||
# store_path#, based 0, if store_path0 not exists, it's value is base_path
|
|
||||||
# the paths must be exist
|
|
||||||
# must same as storage.conf
|
|
||||||
store_path0=/data/fastdfs/upload/path0
|
|
||||||
#store_path1=/home/yuqing/fastdfs1
|
|
||||||
|
|
||||||
# standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level=info
|
|
||||||
|
|
||||||
# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log
|
|
||||||
# empty for output to stderr (apache and nginx error_log file)
|
|
||||||
log_filename=
|
|
||||||
|
|
||||||
# response mode when the file not exist in the local file system
|
|
||||||
## proxy: get the content from other storage server, then send to client
|
|
||||||
## redirect: redirect to the original storage server (HTTP Header is Location)
|
|
||||||
response_mode=proxy
|
|
||||||
|
|
||||||
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
|
|
||||||
# multi aliases split by comma. empty value means auto set by OS type
|
|
||||||
# this paramter used to get all ip address of the local host
|
|
||||||
# default values is empty
|
|
||||||
if_alias_prefix=
|
|
||||||
|
|
||||||
# use "#include" directive to include HTTP config file
|
|
||||||
# NOTE: #include is an include directive, do NOT remove the # before include
|
|
||||||
#include http.conf
|
|
||||||
|
|
||||||
|
|
||||||
# if support flv
|
|
||||||
# default value is false
|
|
||||||
# since v1.15
|
|
||||||
flv_support = true
|
|
||||||
|
|
||||||
# flv file extension name
|
|
||||||
# default value is flv
|
|
||||||
# since v1.15
|
|
||||||
flv_extension = flv
|
|
||||||
|
|
||||||
|
|
||||||
## 如果在此存储服务器上支持多组时,有几组就设置几组。单组为0.
|
|
||||||
## 一台服务器没有必要运行多个group的storage,因为stroage本身支持多存储目录的
|
|
||||||
# set the group count
|
|
||||||
# set to none zero to support multi-group on this storage server
|
|
||||||
# set to 0 for single group only
|
|
||||||
# groups settings section as [group1], [group2], ..., [groupN]
|
|
||||||
# default value is 0
|
|
||||||
# since v1.14
|
|
||||||
group_count = 0
|
|
||||||
|
|
||||||
## 如果在此存储服务器上支持多组时,有几组就设置几组
|
|
||||||
# group settings for group #1
|
|
||||||
# since v1.14
|
|
||||||
# when support multi-group on this storage server, uncomment following section
|
|
||||||
#[group1]
|
|
||||||
#group_name=group1
|
|
||||||
#storage_server_port=23000
|
|
||||||
#store_path_count=2
|
|
||||||
#store_path0=/home/yuqing/fastdfs
|
|
||||||
#store_path1=/home/yuqing/fastdfs1
|
|
||||||
|
|
||||||
# group settings for group #2
|
|
||||||
# since v1.14
|
|
||||||
# when support multi-group, uncomment following section as neccessary
|
|
||||||
#[group2]
|
|
||||||
#group_name=group2
|
|
||||||
#storage_server_port=23000
|
|
||||||
#store_path_count=1
|
|
||||||
#store_path0=/home/yuqing/fastdfs
|
|
||||||
|
|
||||||
|
|
@ -1,353 +0,0 @@
|
||||||
# is this config file disabled
|
|
||||||
# false for enabled
|
|
||||||
# true for disabled
|
|
||||||
disabled = false
|
|
||||||
|
|
||||||
# the name of the group this storage server belongs to
|
|
||||||
#
|
|
||||||
# comment or remove this item for fetching from tracker server,
|
|
||||||
# in this case, use_storage_id must set to true in tracker.conf,
|
|
||||||
# and storage_ids.conf must be configured correctly.
|
|
||||||
group_name = group1
|
|
||||||
|
|
||||||
# bind an address of this host
|
|
||||||
# empty for bind all addresses of this host
|
|
||||||
bind_addr =
|
|
||||||
|
|
||||||
# if bind an address of this host when connect to other servers
|
|
||||||
# (this storage server as a client)
|
|
||||||
# true for binding the address configured by the above parameter: "bind_addr"
|
|
||||||
# false for binding any address of this host
|
|
||||||
client_bind = true
|
|
||||||
|
|
||||||
# the storage server port
|
|
||||||
port = 23000
|
|
||||||
|
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds for send and recv
|
|
||||||
# default value is 30
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the heart beat interval in seconds
|
|
||||||
# the storage server send heartbeat to tracker server periodically
|
|
||||||
# default value is 30
|
|
||||||
heart_beat_interval = 30
|
|
||||||
|
|
||||||
# disk usage report interval in seconds
|
|
||||||
# the storage server send disk usage report to tracker server periodically
|
|
||||||
# default value is 300
|
|
||||||
stat_report_interval = 60
|
|
||||||
|
|
||||||
# the base path to store data and log files
|
|
||||||
# NOTE: the binlog files maybe are large, make sure
|
|
||||||
# the base path has enough disk space,
|
|
||||||
# eg. the disk free space should > 50GB
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# max concurrent connections the server supported,
|
|
||||||
# you should set this parameter larger, eg. 10240
|
|
||||||
# default value is 256
|
|
||||||
max_connections = 1024
|
|
||||||
|
|
||||||
# the buff size to recv / send data from/to network
|
|
||||||
# this parameter must more than 8KB
|
|
||||||
# 256KB or 512KB is recommended
|
|
||||||
# default value is 64KB
|
|
||||||
# since V2.00
|
|
||||||
buff_size = 256KB
|
|
||||||
|
|
||||||
# accept thread count
|
|
||||||
# default value is 1 which is recommended
|
|
||||||
# since V4.07
|
|
||||||
accept_threads = 1
|
|
||||||
|
|
||||||
# work thread count
|
|
||||||
# work threads to deal network io
|
|
||||||
# default value is 4
|
|
||||||
# since V2.00
|
|
||||||
work_threads = 4
|
|
||||||
|
|
||||||
# if disk read / write separated
|
|
||||||
## false for mixed read and write
|
|
||||||
## true for separated read and write
|
|
||||||
# default value is true
|
|
||||||
# since V2.00
|
|
||||||
disk_rw_separated = true
|
|
||||||
|
|
||||||
# disk reader thread count per store path
|
|
||||||
# for mixed read / write, this parameter can be 0
|
|
||||||
# default value is 1
|
|
||||||
# since V2.00
|
|
||||||
disk_reader_threads = 1
|
|
||||||
|
|
||||||
# disk writer thread count per store path
|
|
||||||
# for mixed read / write, this parameter can be 0
|
|
||||||
# default value is 1
|
|
||||||
# since V2.00
|
|
||||||
disk_writer_threads = 1
|
|
||||||
|
|
||||||
# when no entry to sync, try read binlog again after X milliseconds
|
|
||||||
# must > 0, default value is 200ms
|
|
||||||
sync_wait_msec = 50
|
|
||||||
|
|
||||||
# after sync a file, usleep milliseconds
|
|
||||||
# 0 for sync successively (never call usleep)
|
|
||||||
sync_interval = 0
|
|
||||||
|
|
||||||
# storage sync start time of a day, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
sync_start_time = 00:00
|
|
||||||
|
|
||||||
# storage sync end time of a day, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
sync_end_time = 23:59
|
|
||||||
|
|
||||||
# write to the mark file after sync N files
|
|
||||||
# default value is 500
|
|
||||||
write_mark_file_freq = 500
|
|
||||||
|
|
||||||
# disk recovery thread count
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
disk_recovery_threads = 3
|
|
||||||
|
|
||||||
# store path (disk or mount point) count, default value is 1
|
|
||||||
store_path_count = 1
|
|
||||||
|
|
||||||
# store_path#, based on 0, to configure the store paths to store files
|
|
||||||
# if store_path0 not exists, it's value is base_path (NOT recommended)
|
|
||||||
# the paths must be exist.
|
|
||||||
#
|
|
||||||
# IMPORTANT NOTE:
|
|
||||||
# the store paths' order is very important, don't mess up!!!
|
|
||||||
# the base_path should be independent (different) of the store paths
|
|
||||||
|
|
||||||
store_path0 = /data/fastdfs/upload/path0
|
|
||||||
#store_path1 = /home/yuqing/fastdfs2
|
|
||||||
|
|
||||||
# subdir_count * subdir_count directories will be auto created under each
|
|
||||||
# store_path (disk), value can be 1 to 256, default value is 256
|
|
||||||
subdir_count_per_path = 256
|
|
||||||
|
|
||||||
# tracker_server can ocur more than once for multi tracker servers.
|
|
||||||
# the value format of tracker_server is "HOST:PORT",
|
|
||||||
# the HOST can be hostname or ip address,
|
|
||||||
# and the HOST can be dual IPs or hostnames seperated by comma,
|
|
||||||
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs.
|
|
||||||
# for example: 192.168.2.100,122.244.141.46:22122
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21:22122
|
|
||||||
|
|
||||||
tracker_server = 192.168.209.121:22122
|
|
||||||
tracker_server = 192.168.209.122:22122
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
#unix group name to run this program,
|
|
||||||
#not set (empty) means run by the group of current user
|
|
||||||
run_by_group =
|
|
||||||
|
|
||||||
#unix username to run this program,
|
|
||||||
#not set (empty) means run by current user
|
|
||||||
run_by_user =
|
|
||||||
|
|
||||||
# allow_hosts can ocur more than once, host can be hostname or ip address,
|
|
||||||
# "*" (only one asterisk) means match all ip addresses
|
|
||||||
# we can use CIDR ips like 192.168.5.64/26
|
|
||||||
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
|
|
||||||
# for example:
|
|
||||||
# allow_hosts=10.0.1.[1-15,20]
|
|
||||||
# allow_hosts=host[01-08,20-25].domain.com
|
|
||||||
# allow_hosts=192.168.5.64/26
|
|
||||||
allow_hosts = *
|
|
||||||
|
|
||||||
# the mode of the files distributed to the data path
|
|
||||||
# 0: round robin(default)
|
|
||||||
# 1: random, distributted by hash code
|
|
||||||
file_distribute_path_mode = 0
|
|
||||||
|
|
||||||
# valid when file_distribute_to_path is set to 0 (round robin).
|
|
||||||
# when the written file count reaches this number, then rotate to next path.
|
|
||||||
# rotate to the first path (00/00) after the last path (such as FF/FF).
|
|
||||||
# default value is 100
|
|
||||||
file_distribute_rotate_count = 100
|
|
||||||
|
|
||||||
# call fsync to disk when write big file
|
|
||||||
# 0: never call fsync
|
|
||||||
# other: call fsync when written bytes >= this bytes
|
|
||||||
# default value is 0 (never call fsync)
|
|
||||||
fsync_after_written_bytes = 0
|
|
||||||
|
|
||||||
# sync log buff to disk every interval seconds
|
|
||||||
# must > 0, default value is 10 seconds
|
|
||||||
sync_log_buff_interval = 1
|
|
||||||
|
|
||||||
# sync binlog buff / cache to disk every interval seconds
|
|
||||||
# default value is 60 seconds
|
|
||||||
sync_binlog_buff_interval = 1
|
|
||||||
|
|
||||||
# sync storage stat info to disk every interval seconds
|
|
||||||
# default value is 300 seconds
|
|
||||||
sync_stat_file_interval = 300
|
|
||||||
|
|
||||||
# thread stack size, should >= 512KB
|
|
||||||
# default value is 512KB
|
|
||||||
thread_stack_size = 512KB
|
|
||||||
|
|
||||||
# the priority as a source server for uploading file.
|
|
||||||
# the lower this value, the higher its uploading priority.
|
|
||||||
# default value is 10
|
|
||||||
upload_priority = 10
|
|
||||||
|
|
||||||
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
|
|
||||||
# multi aliases split by comma. empty value means auto set by OS type
|
|
||||||
# default values is empty
|
|
||||||
if_alias_prefix =
|
|
||||||
|
|
||||||
# if check file duplicate, when set to true, use FastDHT to store file indexes
|
|
||||||
# 1 or yes: need check
|
|
||||||
# 0 or no: do not check
|
|
||||||
# default value is 0
|
|
||||||
check_file_duplicate = 0
|
|
||||||
|
|
||||||
# file signature method for check file duplicate
|
|
||||||
## hash: four 32 bits hash code
|
|
||||||
## md5: MD5 signature
|
|
||||||
# default value is hash
|
|
||||||
# since V4.01
|
|
||||||
file_signature_method = hash
|
|
||||||
|
|
||||||
# namespace for storing file indexes (key-value pairs)
|
|
||||||
# this item must be set when check_file_duplicate is true / on
|
|
||||||
key_namespace = FastDFS
|
|
||||||
|
|
||||||
# set keep_alive to 1 to enable persistent connection with FastDHT servers
|
|
||||||
# default value is 0 (short connection)
|
|
||||||
keep_alive = 0
|
|
||||||
|
|
||||||
# you can use "#include filename" (not include double quotes) directive to
|
|
||||||
# load FastDHT server list, when the filename is a relative path such as
|
|
||||||
# pure filename, the base path is the base path of current/this config file.
|
|
||||||
# must set FastDHT server list when check_file_duplicate is true / on
|
|
||||||
# please see INSTALL of FastDHT for detail
|
|
||||||
##include /home/yuqing/fastdht/conf/fdht_servers.conf
|
|
||||||
|
|
||||||
# if log to access log
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
use_access_log = false
|
|
||||||
|
|
||||||
# if rotate the access log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
rotate_access_log = false
|
|
||||||
|
|
||||||
# rotate access log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.00
|
|
||||||
access_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old access log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_access_log = false
|
|
||||||
|
|
||||||
# compress the access log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_access_log_days_before = 7
|
|
||||||
|
|
||||||
# if rotate the error log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log = false
|
|
||||||
|
|
||||||
# rotate error log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.02
|
|
||||||
error_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old error log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_error_log = false
|
|
||||||
|
|
||||||
# compress the error log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_error_log_days_before = 7
|
|
||||||
|
|
||||||
# rotate access log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_access_log_size = 0
|
|
||||||
|
|
||||||
# rotate error log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log_size = 0
|
|
||||||
|
|
||||||
# keep days of the log files
|
|
||||||
# 0 means do not delete old log files
|
|
||||||
# default value is 0
|
|
||||||
log_file_keep_days = 0
|
|
||||||
|
|
||||||
# if skip the invalid record when sync file
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
file_sync_skip_invalid_record = false
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = true
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# if compress the binlog files by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.01
|
|
||||||
compress_binlog = true
|
|
||||||
|
|
||||||
# try to compress binlog time, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 01:30
|
|
||||||
# since V6.01
|
|
||||||
compress_binlog_time = 01:30
|
|
||||||
|
|
||||||
# if check the mark of store path to prevent confusion
|
|
||||||
# recommend to set this parameter to true
|
|
||||||
# if two storage servers (instances) MUST use a same store path for
|
|
||||||
# some specific purposes, you should set this parameter to false
|
|
||||||
# default value is true
|
|
||||||
# since V6.03
|
|
||||||
check_store_path_mark = true
|
|
||||||
|
|
||||||
# use the ip address of this storage server if domain_name is empty,
|
|
||||||
# else this domain name will ocur in the url redirected by the tracker server
|
|
||||||
http.domain_name =
|
|
||||||
|
|
||||||
# the port of the web server on this storage server
|
|
||||||
http.server_port = 8888
|
|
||||||
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
# <id> <group_name> <ip_or_hostname[:port]>
|
|
||||||
#
|
|
||||||
# id is a natural number (1, 2, 3 etc.),
|
|
||||||
# 6 bits of the id length is enough, such as 100001
|
|
||||||
#
|
|
||||||
# storage ip or hostname can be dual IPs seperated by comma,
|
|
||||||
# one is an inner (intranet) IP and another is an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs
|
|
||||||
# for example: 192.168.2.100,122.244.141.46
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21
|
|
||||||
#
|
|
||||||
# the port is optional. if you run more than one storaged instances
|
|
||||||
# in a server, you must specified the port to distinguish different instances.
|
|
||||||
|
|
||||||
#100001 group1 192.168.0.196
|
|
||||||
#100002 group1 192.168.0.197
|
|
||||||
|
|
@ -1,329 +0,0 @@
|
||||||
# is this config file disabled
|
|
||||||
# false for enabled
|
|
||||||
# true for disabled
|
|
||||||
disabled = false
|
|
||||||
|
|
||||||
# bind an address of this host
|
|
||||||
# empty for bind all addresses of this host
|
|
||||||
bind_addr =
|
|
||||||
|
|
||||||
# the tracker server port
|
|
||||||
port = 22122
|
|
||||||
|
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds for send and recv
|
|
||||||
# default value is 30
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the base path to store data and log files
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# max concurrent connections this server support
|
|
||||||
# you should set this parameter larger, eg. 10240
|
|
||||||
# default value is 256
|
|
||||||
max_connections = 1024
|
|
||||||
|
|
||||||
# accept thread count
|
|
||||||
# default value is 1 which is recommended
|
|
||||||
# since V4.07
|
|
||||||
accept_threads = 1
|
|
||||||
|
|
||||||
# work thread count
|
|
||||||
# work threads to deal network io
|
|
||||||
# default value is 4
|
|
||||||
# since V2.00
|
|
||||||
work_threads = 4
|
|
||||||
|
|
||||||
# the min network buff size
|
|
||||||
# default value 8KB
|
|
||||||
min_buff_size = 8KB
|
|
||||||
|
|
||||||
# the max network buff size
|
|
||||||
# default value 128KB
|
|
||||||
max_buff_size = 128KB
|
|
||||||
|
|
||||||
# the method for selecting group to upload files
|
|
||||||
# 0: round robin
|
|
||||||
# 1: specify group
|
|
||||||
# 2: load balance, select the max free space group to upload file
|
|
||||||
store_lookup = 2
|
|
||||||
|
|
||||||
# which group to upload file
|
|
||||||
# when store_lookup set to 1, must set store_group to the group name
|
|
||||||
store_group = group2
|
|
||||||
|
|
||||||
# which storage server to upload file
|
|
||||||
# 0: round robin (default)
|
|
||||||
# 1: the first server order by ip address
|
|
||||||
# 2: the first server order by priority (the minimal)
|
|
||||||
# Note: if use_trunk_file set to true, must set store_server to 1 or 2
|
|
||||||
store_server = 0
|
|
||||||
|
|
||||||
# which path (means disk or mount point) of the storage server to upload file
|
|
||||||
# 0: round robin
|
|
||||||
# 2: load balance, select the max free space path to upload file
|
|
||||||
store_path = 0
|
|
||||||
|
|
||||||
# which storage server to download file
|
|
||||||
# 0: round robin (default)
|
|
||||||
# 1: the source storage server which the current file uploaded to
|
|
||||||
download_server = 0
|
|
||||||
|
|
||||||
# reserved storage space for system or other applications.
|
|
||||||
# if the free(available) space of any stoarge server in
|
|
||||||
# a group <= reserved_storage_space, no file can be uploaded to this group.
|
|
||||||
# bytes unit can be one of follows:
|
|
||||||
### G or g for gigabyte(GB)
|
|
||||||
### M or m for megabyte(MB)
|
|
||||||
### K or k for kilobyte(KB)
|
|
||||||
### no unit for byte(B)
|
|
||||||
### XX.XX% as ratio such as: reserved_storage_space = 10%
|
|
||||||
reserved_storage_space = 20%
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
#unix group name to run this program,
|
|
||||||
#not set (empty) means run by the group of current user
|
|
||||||
run_by_group=
|
|
||||||
|
|
||||||
#unix username to run this program,
|
|
||||||
#not set (empty) means run by current user
|
|
||||||
run_by_user =
|
|
||||||
|
|
||||||
# allow_hosts can ocur more than once, host can be hostname or ip address,
|
|
||||||
# "*" (only one asterisk) means match all ip addresses
|
|
||||||
# we can use CIDR ips like 192.168.5.64/26
|
|
||||||
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
|
|
||||||
# for example:
|
|
||||||
# allow_hosts=10.0.1.[1-15,20]
|
|
||||||
# allow_hosts=host[01-08,20-25].domain.com
|
|
||||||
# allow_hosts=192.168.5.64/26
|
|
||||||
allow_hosts = *
|
|
||||||
|
|
||||||
# sync log buff to disk every interval seconds
|
|
||||||
# default value is 10 seconds
|
|
||||||
sync_log_buff_interval = 1
|
|
||||||
|
|
||||||
# check storage server alive interval seconds
|
|
||||||
check_active_interval = 120
|
|
||||||
|
|
||||||
# thread stack size, should >= 64KB
|
|
||||||
# default value is 256KB
|
|
||||||
thread_stack_size = 256KB
|
|
||||||
|
|
||||||
# auto adjust when the ip address of the storage server changed
|
|
||||||
# default value is true
|
|
||||||
storage_ip_changed_auto_adjust = true
|
|
||||||
|
|
||||||
# storage sync file max delay seconds
|
|
||||||
# default value is 86400 seconds (one day)
|
|
||||||
# since V2.00
|
|
||||||
storage_sync_file_max_delay = 86400
|
|
||||||
|
|
||||||
# the max time of storage sync a file
|
|
||||||
# default value is 300 seconds
|
|
||||||
# since V2.00
|
|
||||||
storage_sync_file_max_time = 300
|
|
||||||
|
|
||||||
# if use a trunk file to store several small files
|
|
||||||
# default value is false
|
|
||||||
# since V3.00
|
|
||||||
use_trunk_file = false
|
|
||||||
|
|
||||||
# the min slot size, should <= 4KB
|
|
||||||
# default value is 256 bytes
|
|
||||||
# since V3.00
|
|
||||||
slot_min_size = 256
|
|
||||||
|
|
||||||
# the max slot size, should > slot_min_size
|
|
||||||
# store the upload file to trunk file when it's size <= this value
|
|
||||||
# default value is 16MB
|
|
||||||
# since V3.00
|
|
||||||
slot_max_size = 1MB
|
|
||||||
|
|
||||||
# the alignment size to allocate the trunk space
|
|
||||||
# default value is 0 (never align)
|
|
||||||
# since V6.05
|
|
||||||
# NOTE: the larger the alignment size, the less likely of disk
|
|
||||||
# fragmentation, but the more space is wasted.
|
|
||||||
trunk_alloc_alignment_size = 256
|
|
||||||
|
|
||||||
# if merge contiguous free spaces of trunk file
|
|
||||||
# default value is false
|
|
||||||
# since V6.05
|
|
||||||
trunk_free_space_merge = true
|
|
||||||
|
|
||||||
# if delete / reclaim the unused trunk files
|
|
||||||
# default value is false
|
|
||||||
# since V6.05
|
|
||||||
delete_unused_trunk_files = false
|
|
||||||
|
|
||||||
# the trunk file size, should >= 4MB
|
|
||||||
# default value is 64MB
|
|
||||||
# since V3.00
|
|
||||||
trunk_file_size = 64MB
|
|
||||||
|
|
||||||
# if create trunk file advancely
|
|
||||||
# default value is false
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_advance = false
|
|
||||||
|
|
||||||
# the time base to create trunk file
|
|
||||||
# the time format: HH:MM
|
|
||||||
# default value is 02:00
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_time_base = 02:00
|
|
||||||
|
|
||||||
# the interval of create trunk file, unit: second
|
|
||||||
# default value is 38400 (one day)
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_interval = 86400
|
|
||||||
|
|
||||||
# the threshold to create trunk file
|
|
||||||
# when the free trunk file size less than the threshold,
|
|
||||||
# will create he trunk files
|
|
||||||
# default value is 0
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_space_threshold = 20G
|
|
||||||
|
|
||||||
# if check trunk space occupying when loading trunk free spaces
|
|
||||||
# the occupied spaces will be ignored
|
|
||||||
# default value is false
|
|
||||||
# since V3.09
|
|
||||||
# NOTICE: set this parameter to true will slow the loading of trunk spaces
|
|
||||||
# when startup. you should set this parameter to true when neccessary.
|
|
||||||
trunk_init_check_occupying = false
|
|
||||||
|
|
||||||
# if ignore storage_trunk.dat, reload from trunk binlog
|
|
||||||
# default value is false
|
|
||||||
# since V3.10
|
|
||||||
# set to true once for version upgrade when your version less than V3.10
|
|
||||||
trunk_init_reload_from_binlog = false
|
|
||||||
|
|
||||||
# the min interval for compressing the trunk binlog file
|
|
||||||
# unit: second, 0 means never compress
|
|
||||||
# FastDFS compress the trunk binlog when trunk init and trunk destroy
|
|
||||||
# recommand to set this parameter to 86400 (one day)
|
|
||||||
# default value is 0
|
|
||||||
# since V5.01
|
|
||||||
trunk_compress_binlog_min_interval = 86400
|
|
||||||
|
|
||||||
# the interval for compressing the trunk binlog file
|
|
||||||
# unit: second, 0 means never compress
|
|
||||||
# recommand to set this parameter to 86400 (one day)
|
|
||||||
# default value is 0
|
|
||||||
# since V6.05
|
|
||||||
trunk_compress_binlog_interval = 86400
|
|
||||||
|
|
||||||
# compress the trunk binlog time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 03:00
|
|
||||||
# since V6.05
|
|
||||||
trunk_compress_binlog_time_base = 03:00
|
|
||||||
|
|
||||||
# max backups for the trunk binlog file
|
|
||||||
# default value is 0 (never backup)
|
|
||||||
# since V6.05
|
|
||||||
trunk_binlog_max_backups = 7
|
|
||||||
|
|
||||||
# if use storage server ID instead of IP address
|
|
||||||
# if you want to use dual IPs for storage server, you MUST set
|
|
||||||
# this parameter to true, and configure the dual IPs in the file
|
|
||||||
# configured by following item "storage_ids_filename", such as storage_ids.conf
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# this parameter is valid only when use_storage_id set to true
|
|
||||||
# since V4.00
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
# id type of the storage server in the filename, values are:
|
|
||||||
## ip: the ip address of the storage server
|
|
||||||
## id: the server id of the storage server
|
|
||||||
# this paramter is valid only when use_storage_id set to true
|
|
||||||
# default value is ip
|
|
||||||
# since V4.03
|
|
||||||
id_type_in_filename = id
|
|
||||||
|
|
||||||
# if store slave file use symbol link
|
|
||||||
# default value is false
|
|
||||||
# since V4.01
|
|
||||||
store_slave_file_use_link = false
|
|
||||||
|
|
||||||
# if rotate the error log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log = false
|
|
||||||
|
|
||||||
# rotate error log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.02
|
|
||||||
error_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old error log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_error_log = false
|
|
||||||
|
|
||||||
# compress the error log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_error_log_days_before = 7
|
|
||||||
|
|
||||||
# rotate error log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log_size = 0
|
|
||||||
|
|
||||||
# keep days of the log files
|
|
||||||
# 0 means do not delete old log files
|
|
||||||
# default value is 0
|
|
||||||
log_file_keep_days = 0
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = true
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# HTTP port on this tracker server
|
|
||||||
http.server_port = 8080
|
|
||||||
|
|
||||||
# check storage HTTP server alive interval seconds
|
|
||||||
# <= 0 for never check
|
|
||||||
# default value is 30
|
|
||||||
http.check_alive_interval = 30
|
|
||||||
|
|
||||||
# check storage HTTP server alive type, values are:
|
|
||||||
# tcp : connect to the storge server with HTTP port only,
|
|
||||||
# do not request and get response
|
|
||||||
# http: storage check alive url must return http status 200
|
|
||||||
# default value is tcp
|
|
||||||
http.check_alive_type = tcp
|
|
||||||
|
|
||||||
# check storage HTTP server alive uri/url
|
|
||||||
# NOTE: storage embed HTTP server support uri: /status.html
|
|
||||||
http.check_alive_uri = /status.html
|
|
||||||
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
#http server
|
|
||||||
#
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 9088;
|
|
||||||
server_name localhost;
|
|
||||||
|
|
||||||
#open() “/usr/local/nginx/html/favicon.ico” failed (2: No such file or directory),关闭它即可
|
|
||||||
location = /favicon.ico {
|
|
||||||
log_not_found off;
|
|
||||||
access_log off;
|
|
||||||
}
|
|
||||||
|
|
||||||
#将http文件访问请求反向代理给扩展模块,不打印请求日志
|
|
||||||
location ~/group[0-9]/ {
|
|
||||||
ngx_fastdfs_module;
|
|
||||||
|
|
||||||
log_not_found off;
|
|
||||||
access_log off;
|
|
||||||
}
|
|
||||||
|
|
||||||
# location ~ /group1/M00 {
|
|
||||||
# alias /data/fastdfs/upload/path0;
|
|
||||||
# ngx_fastdfs_module;
|
|
||||||
# }
|
|
||||||
|
|
||||||
# location ~ /group1/M01 {
|
|
||||||
# alias /data/fastdfs/upload/path1;
|
|
||||||
# ngx_fastdfs_module;
|
|
||||||
# }
|
|
||||||
|
|
||||||
error_page 500 502 503 504 /50x.html;
|
|
||||||
location = /50x.html {
|
|
||||||
root html;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
worker_processes 1;
|
|
||||||
worker_rlimit_nofile 65535; #务必先修改服务器的max open files 数。
|
|
||||||
|
|
||||||
error_log /data/fastdfs_data/logs/nginx-error.log;
|
|
||||||
|
|
||||||
events {
|
|
||||||
use epoll; #服务器若是Linux 2.6+,你应该使用epoll。
|
|
||||||
worker_connections 65535;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
include mime.types;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
|
|
||||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
|
||||||
'$status $body_bytes_sent "$http_referer" '
|
|
||||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
|
||||||
|
|
||||||
access_log /data/fastdfs_data/logs/nginx-access.log main;
|
|
||||||
sendfile on;
|
|
||||||
keepalive_timeout 65;
|
|
||||||
|
|
||||||
gzip on;
|
|
||||||
gzip_min_length 2k;
|
|
||||||
gzip_buffers 8 32k;
|
|
||||||
gzip_http_version 1.1;
|
|
||||||
gzip_comp_level 2;
|
|
||||||
gzip_types text/plain text/css text/javascript application/json application/javascript application/x-javascript application/xml;
|
|
||||||
gzip_vary on;
|
|
||||||
|
|
||||||
include /usr/local/nginx/conf.d/*.conf;
|
|
||||||
|
|
||||||
}
|
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -1,55 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# fastdfs 配置文件,我设置的存储路径,需要提前创建
|
|
||||||
FASTDFS_BASE_PATH=/data/fastdfs_data \
|
|
||||||
FASTDFS_STORE_PATH=/data/fastdfs/upload \
|
|
||||||
|
|
||||||
# 启用参数
|
|
||||||
# - tracker : 启动tracker_server 服务
|
|
||||||
# - storage : 启动storage 服务
|
|
||||||
start_parameter=$1
|
|
||||||
|
|
||||||
if [ ! -d "$FASTDFS_BASE_PATH" ]; then
|
|
||||||
mkdir -p ${FASTDFS_BASE_PATH};
|
|
||||||
fi
|
|
||||||
|
|
||||||
function start_tracker(){
|
|
||||||
|
|
||||||
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf
|
|
||||||
tail -f /data/fastdfs_data/logs/trackerd.log
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
function start_storage(){
|
|
||||||
if [ ! -d "$FASTDFS_STORE_PATH" ]; then
|
|
||||||
mkdir -p ${FASTDFS_STORE_PATH}/{path0,path1,path2,path3};
|
|
||||||
fi
|
|
||||||
/usr/bin/fdfs_storaged /etc/fdfs/storage.conf;
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# nginx日志存储目录为/data/fastdfs_data/logs/,手动创建一下,防止storage启动慢,还没有来得及创建logs目录
|
|
||||||
if [ ! -d "$FASTDFS_BASE_PATH/logs" ]; then
|
|
||||||
mkdir -p ${FASTDFS_BASE_PATH}/logs;
|
|
||||||
fi
|
|
||||||
|
|
||||||
/usr/local/nginx/sbin/nginx;
|
|
||||||
tail -f /data/fastdfs_data/logs/storaged.log;
|
|
||||||
}
|
|
||||||
|
|
||||||
function run (){
|
|
||||||
|
|
||||||
case ${start_parameter} in
|
|
||||||
tracker)
|
|
||||||
echo "启动tracker"
|
|
||||||
start_tracker
|
|
||||||
;;
|
|
||||||
storage)
|
|
||||||
echo "启动storage"
|
|
||||||
start_storage
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "请指定要启动哪个服务,tracker还是storage(二选一),传参为tracker | storage"
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
run
|
|
||||||
|
|
@ -1,83 +0,0 @@
|
||||||
# 选择系统镜像作为基础镜像,可以使用超小的Linux镜像alpine
|
|
||||||
#FROM centos:7
|
|
||||||
FROM alpine:3.16
|
|
||||||
|
|
||||||
LABEL MAINTAINER liyanjing 284223249@qq.com
|
|
||||||
|
|
||||||
# 注意
|
|
||||||
# v6.0.9 依赖libfastcommon和libserverframe, v6.0.8及以下依赖libevent和libfastcommon两个库,其中libfastcommon是 FastDFS 官方提供的
|
|
||||||
# v6.0.9 适配fastdfs-nginx-module-1.23,v6.0.8及以下是fastdfs-nginx-module-1.22
|
|
||||||
|
|
||||||
# 0.安装包位置,fdfs的基本目录和存储目录
|
|
||||||
ENV INSTALL_PATH=/usr/local/src \
|
|
||||||
LIBFASTCOMMON_VERSION="1.0.60" \
|
|
||||||
LIBSERVERFRAME_VERSION="1.1.19" \
|
|
||||||
FASTDFS_VERSION="V6.09" \
|
|
||||||
FASTDFS_NGINX_MODULE_VERSION="1.23" \
|
|
||||||
NGINX_VERSION="1.22.0" \
|
|
||||||
TENGINE_VERSION="2.3.3"
|
|
||||||
|
|
||||||
# 0.change the system source for installing libs
|
|
||||||
RUN echo "http://mirrors.aliyun.com/alpine/v3.16/main" > /etc/apk/repositories \
|
|
||||||
&& echo "http://mirrors.aliyun.com/alpine/v3.16/community" >> /etc/apk/repositories
|
|
||||||
|
|
||||||
# 1.复制安装包
|
|
||||||
ADD soft ${INSTALL_PATH}
|
|
||||||
|
|
||||||
# 2.环境安装
|
|
||||||
# - 创建fdfs的存储目录
|
|
||||||
# - 安装依赖
|
|
||||||
# - 安装libfastcommon
|
|
||||||
# - 安装fastdfs
|
|
||||||
# - 安装nginx,设置nginx和fastdfs联合环境
|
|
||||||
#Run yum -y install -y gcc gcc-c++ libevent libevent-devel make automake autoconf libtool perl pcre pcre-devel zlib zlib-devel openssl openssl-devel zip unzip net-tools wget vim lsof \
|
|
||||||
RUN apk update && apk add --no-cache --virtual .build-deps bash autoconf gcc libc-dev make pcre-dev zlib-dev linux-headers gnupg libxslt-dev gd-dev geoip-dev wget \
|
|
||||||
&& cd ${INSTALL_PATH} \
|
|
||||||
&& tar -zxf libfastcommon-${LIBFASTCOMMON_VERSION}.tar.gz \
|
|
||||||
&& tar -zxf libserverframe-${LIBSERVERFRAME_VERSION}.tar.gz \
|
|
||||||
&& tar -zxf fastdfs-${FASTDFS_VERSION}.tar.gz \
|
|
||||||
&& tar -zxf fastdfs-nginx-module-${FASTDFS_NGINX_MODULE_VERSION}.tar.gz \
|
|
||||||
&& tar -zxf nginx-${NGINX_VERSION}.tar.gz \
|
|
||||||
\
|
|
||||||
&& cd ${INSTALL_PATH}/libfastcommon-${LIBFASTCOMMON_VERSION}/ \
|
|
||||||
&& ./make.sh \
|
|
||||||
&& ./make.sh install \
|
|
||||||
&& cd ${INSTALL_PATH}/libserverframe-${LIBSERVERFRAME_VERSION}/ \
|
|
||||||
&& ./make.sh \
|
|
||||||
&& ./make.sh install \
|
|
||||||
&& cd ${INSTALL_PATH}/fastdfs-${FASTDFS_VERSION}/ \
|
|
||||||
&& ./make.sh \
|
|
||||||
&& ./make.sh install \
|
|
||||||
\
|
|
||||||
&& cd ${INSTALL_PATH}/nginx-${NGINX_VERSION}/ \
|
|
||||||
&& ./configure --prefix=/usr/local/nginx --pid-path=/var/run/nginx/nginx.pid --with-http_stub_status_module --with-http_gzip_static_module --with-http_realip_module --with-http_sub_module --with-stream=dynamic \
|
|
||||||
--add-module=${INSTALL_PATH}/fastdfs-nginx-module-${FASTDFS_NGINX_MODULE_VERSION}/src/ \
|
|
||||||
&& make \
|
|
||||||
&& make install \
|
|
||||||
\
|
|
||||||
&& rm -rf ${INSTALL_PATH}/* \
|
|
||||||
&& apk del .build-deps gcc libc-dev make linux-headers gnupg libxslt-dev gd-dev geoip-dev wget
|
|
||||||
|
|
||||||
# 3.添加配置文件,目标路径以/结尾,docker会把它当作目录,不存在时,会自动创建
|
|
||||||
COPY conf/*.* /etc/fdfs/
|
|
||||||
COPY nginx_conf/nginx.conf /usr/local/nginx/conf/
|
|
||||||
COPY nginx_conf.d/*.conf /usr/local/nginx/conf.d/
|
|
||||||
COPY start.sh /
|
|
||||||
|
|
||||||
|
|
||||||
ENV TZ=Asia/Shanghai
|
|
||||||
|
|
||||||
# 4.更改启动脚本执行权限,设置时区为中国时间
|
|
||||||
RUN chmod u+x /start.sh \
|
|
||||||
&& apk add --no-cache bash pcre-dev zlib-dev \
|
|
||||||
\
|
|
||||||
&& apk add -U tzdata \
|
|
||||||
&& ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone \
|
|
||||||
&& apk del tzdata && rm -rf /var/cache/apk/*
|
|
||||||
|
|
||||||
EXPOSE 22122 23000 9088
|
|
||||||
|
|
||||||
WORKDIR /
|
|
||||||
|
|
||||||
# 镜像启动
|
|
||||||
ENTRYPOINT ["/bin/bash","/start.sh"]
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 23 KiB |
|
|
@ -1,71 +0,0 @@
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the base path to store log files
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# tracker_server can ocur more than once for multi tracker servers.
|
|
||||||
# the value format of tracker_server is "HOST:PORT",
|
|
||||||
# the HOST can be hostname or ip address,
|
|
||||||
# and the HOST can be dual IPs or hostnames seperated by comma,
|
|
||||||
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs.
|
|
||||||
# for example: 192.168.2.100,122.244.141.46:22122
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21:22122
|
|
||||||
|
|
||||||
tracker_server = 192.168.0.196:22122
|
|
||||||
tracker_server = 192.168.0.197:22122
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = false
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# if load FastDFS parameters from tracker server
|
|
||||||
# since V4.05
|
|
||||||
# default value is false
|
|
||||||
load_fdfs_parameters_from_tracker = false
|
|
||||||
|
|
||||||
# if use storage ID instead of IP address
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V4.05
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
|
|
||||||
#HTTP settings
|
|
||||||
http.tracker_server_port = 80
|
|
||||||
|
|
||||||
#use "#include" directive to include HTTP other settiongs
|
|
||||||
##include http.conf
|
|
||||||
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
# HTTP default content type
|
|
||||||
http.default_content_type = application/octet-stream
|
|
||||||
|
|
||||||
# MIME types mapping filename
|
|
||||||
# MIME types file format: MIME_type extensions
|
|
||||||
# such as: image/jpeg jpeg jpg jpe
|
|
||||||
# you can use apache's MIME file: mime.types
|
|
||||||
http.mime_types_filename = mime.types
|
|
||||||
|
|
||||||
# if use token to anti-steal
|
|
||||||
# default value is false (0)
|
|
||||||
http.anti_steal.check_token = false
|
|
||||||
|
|
||||||
# token TTL (time to live), seconds
|
|
||||||
# default value is 600
|
|
||||||
http.anti_steal.token_ttl = 900
|
|
||||||
|
|
||||||
# secret key to generate anti-steal token
|
|
||||||
# this parameter must be set when http.anti_steal.check_token set to true
|
|
||||||
# the length of the secret key should not exceed 128 bytes
|
|
||||||
http.anti_steal.secret_key = FastDFS1234567890
|
|
||||||
|
|
||||||
# return the content of the file when check token fail
|
|
||||||
# default value is empty (no file sepecified)
|
|
||||||
http.anti_steal.token_check_fail = /home/yuqing/fastdfs/conf/anti-steal.jpg
|
|
||||||
|
|
||||||
# if support multi regions for HTTP Range
|
|
||||||
# default value is true
|
|
||||||
http.multi_range.enabed = true
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,137 +0,0 @@
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
connect_timeout=15
|
|
||||||
|
|
||||||
# network recv and send timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
network_timeout=30
|
|
||||||
|
|
||||||
# the base path to store log files
|
|
||||||
base_path=/data/fastdfs_data
|
|
||||||
|
|
||||||
# if load FastDFS parameters from tracker server
|
|
||||||
# since V1.12
|
|
||||||
# default value is false
|
|
||||||
load_fdfs_parameters_from_tracker=true
|
|
||||||
|
|
||||||
# storage sync file max delay seconds
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V1.12
|
|
||||||
# default value is 86400 seconds (one day)
|
|
||||||
storage_sync_file_max_delay = 86400
|
|
||||||
|
|
||||||
# if use storage ID instead of IP address
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# default value is false
|
|
||||||
# since V1.13
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V1.13
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
# FastDFS tracker_server can ocur more than once, and tracker_server format is
|
|
||||||
# "host:port", host can be hostname or ip address
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is true
|
|
||||||
tracker_server = 192.168.209.121:22122
|
|
||||||
tracker_server = 192.168.209.122:22122
|
|
||||||
|
|
||||||
# the port of the local storage server
|
|
||||||
# the default value is 23000
|
|
||||||
storage_server_port=23000
|
|
||||||
|
|
||||||
# the group name of the local storage server
|
|
||||||
group_name=group1
|
|
||||||
|
|
||||||
# if the url / uri including the group name
|
|
||||||
# set to false when uri like /M00/00/00/xxx
|
|
||||||
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx
|
|
||||||
# default value is false
|
|
||||||
url_have_group_name = true
|
|
||||||
|
|
||||||
# path(disk or mount point) count, default value is 1
|
|
||||||
# must same as storage.conf
|
|
||||||
store_path_count=1
|
|
||||||
|
|
||||||
# store_path#, based 0, if store_path0 not exists, it's value is base_path
|
|
||||||
# the paths must be exist
|
|
||||||
# must same as storage.conf
|
|
||||||
store_path0=/data/fastdfs/upload/path0
|
|
||||||
#store_path1=/home/yuqing/fastdfs1
|
|
||||||
|
|
||||||
# standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level=info
|
|
||||||
|
|
||||||
# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log
|
|
||||||
# empty for output to stderr (apache and nginx error_log file)
|
|
||||||
log_filename=
|
|
||||||
|
|
||||||
# response mode when the file not exist in the local file system
|
|
||||||
## proxy: get the content from other storage server, then send to client
|
|
||||||
## redirect: redirect to the original storage server (HTTP Header is Location)
|
|
||||||
response_mode=proxy
|
|
||||||
|
|
||||||
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
|
|
||||||
# multi aliases split by comma. empty value means auto set by OS type
|
|
||||||
# this paramter used to get all ip address of the local host
|
|
||||||
# default values is empty
|
|
||||||
if_alias_prefix=
|
|
||||||
|
|
||||||
# use "#include" directive to include HTTP config file
|
|
||||||
# NOTE: #include is an include directive, do NOT remove the # before include
|
|
||||||
#include http.conf
|
|
||||||
|
|
||||||
|
|
||||||
# if support flv
|
|
||||||
# default value is false
|
|
||||||
# since v1.15
|
|
||||||
flv_support = true
|
|
||||||
|
|
||||||
# flv file extension name
|
|
||||||
# default value is flv
|
|
||||||
# since v1.15
|
|
||||||
flv_extension = flv
|
|
||||||
|
|
||||||
|
|
||||||
## 如果在此存储服务器上支持多组时,有几组就设置几组。单组为0.
|
|
||||||
## 一台服务器没有必要运行多个group的storage,因为stroage本身支持多存储目录的
|
|
||||||
# set the group count
|
|
||||||
# set to none zero to support multi-group on this storage server
|
|
||||||
# set to 0 for single group only
|
|
||||||
# groups settings section as [group1], [group2], ..., [groupN]
|
|
||||||
# default value is 0
|
|
||||||
# since v1.14
|
|
||||||
group_count = 0
|
|
||||||
|
|
||||||
## 如果在此存储服务器上支持多组时,有几组就设置几组
|
|
||||||
# group settings for group #1
|
|
||||||
# since v1.14
|
|
||||||
# when support multi-group on this storage server, uncomment following section
|
|
||||||
#[group1]
|
|
||||||
#group_name=group1
|
|
||||||
#storage_server_port=23000
|
|
||||||
#store_path_count=2
|
|
||||||
#store_path0=/home/yuqing/fastdfs
|
|
||||||
#store_path1=/home/yuqing/fastdfs1
|
|
||||||
|
|
||||||
# group settings for group #2
|
|
||||||
# since v1.14
|
|
||||||
# when support multi-group, uncomment following section as neccessary
|
|
||||||
#[group2]
|
|
||||||
#group_name=group2
|
|
||||||
#storage_server_port=23000
|
|
||||||
#store_path_count=1
|
|
||||||
#store_path0=/home/yuqing/fastdfs
|
|
||||||
|
|
||||||
|
|
@ -1,353 +0,0 @@
|
||||||
# is this config file disabled
|
|
||||||
# false for enabled
|
|
||||||
# true for disabled
|
|
||||||
disabled = false
|
|
||||||
|
|
||||||
# the name of the group this storage server belongs to
|
|
||||||
#
|
|
||||||
# comment or remove this item for fetching from tracker server,
|
|
||||||
# in this case, use_storage_id must set to true in tracker.conf,
|
|
||||||
# and storage_ids.conf must be configured correctly.
|
|
||||||
group_name = group1
|
|
||||||
|
|
||||||
# bind an address of this host
|
|
||||||
# empty for bind all addresses of this host
|
|
||||||
bind_addr =
|
|
||||||
|
|
||||||
# if bind an address of this host when connect to other servers
|
|
||||||
# (this storage server as a client)
|
|
||||||
# true for binding the address configured by the above parameter: "bind_addr"
|
|
||||||
# false for binding any address of this host
|
|
||||||
client_bind = true
|
|
||||||
|
|
||||||
# the storage server port
|
|
||||||
port = 23000
|
|
||||||
|
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds for send and recv
|
|
||||||
# default value is 30
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the heart beat interval in seconds
|
|
||||||
# the storage server send heartbeat to tracker server periodically
|
|
||||||
# default value is 30
|
|
||||||
heart_beat_interval = 30
|
|
||||||
|
|
||||||
# disk usage report interval in seconds
|
|
||||||
# the storage server send disk usage report to tracker server periodically
|
|
||||||
# default value is 300
|
|
||||||
stat_report_interval = 60
|
|
||||||
|
|
||||||
# the base path to store data and log files
|
|
||||||
# NOTE: the binlog files maybe are large, make sure
|
|
||||||
# the base path has enough disk space,
|
|
||||||
# eg. the disk free space should > 50GB
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# max concurrent connections the server supported,
|
|
||||||
# you should set this parameter larger, eg. 10240
|
|
||||||
# default value is 256
|
|
||||||
max_connections = 1024
|
|
||||||
|
|
||||||
# the buff size to recv / send data from/to network
|
|
||||||
# this parameter must more than 8KB
|
|
||||||
# 256KB or 512KB is recommended
|
|
||||||
# default value is 64KB
|
|
||||||
# since V2.00
|
|
||||||
buff_size = 256KB
|
|
||||||
|
|
||||||
# accept thread count
|
|
||||||
# default value is 1 which is recommended
|
|
||||||
# since V4.07
|
|
||||||
accept_threads = 1
|
|
||||||
|
|
||||||
# work thread count
|
|
||||||
# work threads to deal network io
|
|
||||||
# default value is 4
|
|
||||||
# since V2.00
|
|
||||||
work_threads = 4
|
|
||||||
|
|
||||||
# if disk read / write separated
|
|
||||||
## false for mixed read and write
|
|
||||||
## true for separated read and write
|
|
||||||
# default value is true
|
|
||||||
# since V2.00
|
|
||||||
disk_rw_separated = true
|
|
||||||
|
|
||||||
# disk reader thread count per store path
|
|
||||||
# for mixed read / write, this parameter can be 0
|
|
||||||
# default value is 1
|
|
||||||
# since V2.00
|
|
||||||
disk_reader_threads = 1
|
|
||||||
|
|
||||||
# disk writer thread count per store path
|
|
||||||
# for mixed read / write, this parameter can be 0
|
|
||||||
# default value is 1
|
|
||||||
# since V2.00
|
|
||||||
disk_writer_threads = 1
|
|
||||||
|
|
||||||
# when no entry to sync, try read binlog again after X milliseconds
|
|
||||||
# must > 0, default value is 200ms
|
|
||||||
sync_wait_msec = 50
|
|
||||||
|
|
||||||
# after sync a file, usleep milliseconds
|
|
||||||
# 0 for sync successively (never call usleep)
|
|
||||||
sync_interval = 0
|
|
||||||
|
|
||||||
# storage sync start time of a day, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
sync_start_time = 00:00
|
|
||||||
|
|
||||||
# storage sync end time of a day, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
sync_end_time = 23:59
|
|
||||||
|
|
||||||
# write to the mark file after sync N files
|
|
||||||
# default value is 500
|
|
||||||
write_mark_file_freq = 500
|
|
||||||
|
|
||||||
# disk recovery thread count
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
disk_recovery_threads = 3
|
|
||||||
|
|
||||||
# store path (disk or mount point) count, default value is 1
|
|
||||||
store_path_count = 1
|
|
||||||
|
|
||||||
# store_path#, based on 0, to configure the store paths to store files
|
|
||||||
# if store_path0 not exists, it's value is base_path (NOT recommended)
|
|
||||||
# the paths must be exist.
|
|
||||||
#
|
|
||||||
# IMPORTANT NOTE:
|
|
||||||
# the store paths' order is very important, don't mess up!!!
|
|
||||||
# the base_path should be independent (different) of the store paths
|
|
||||||
|
|
||||||
store_path0 = /data/fastdfs/upload/path0
|
|
||||||
#store_path1 = /home/yuqing/fastdfs2
|
|
||||||
|
|
||||||
# subdir_count * subdir_count directories will be auto created under each
|
|
||||||
# store_path (disk), value can be 1 to 256, default value is 256
|
|
||||||
subdir_count_per_path = 256
|
|
||||||
|
|
||||||
# tracker_server can ocur more than once for multi tracker servers.
|
|
||||||
# the value format of tracker_server is "HOST:PORT",
|
|
||||||
# the HOST can be hostname or ip address,
|
|
||||||
# and the HOST can be dual IPs or hostnames seperated by comma,
|
|
||||||
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs.
|
|
||||||
# for example: 192.168.2.100,122.244.141.46:22122
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21:22122
|
|
||||||
|
|
||||||
tracker_server = 192.168.209.121:22122
|
|
||||||
tracker_server = 192.168.209.122:22122
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
#unix group name to run this program,
|
|
||||||
#not set (empty) means run by the group of current user
|
|
||||||
run_by_group =
|
|
||||||
|
|
||||||
#unix username to run this program,
|
|
||||||
#not set (empty) means run by current user
|
|
||||||
run_by_user =
|
|
||||||
|
|
||||||
# allow_hosts can ocur more than once, host can be hostname or ip address,
|
|
||||||
# "*" (only one asterisk) means match all ip addresses
|
|
||||||
# we can use CIDR ips like 192.168.5.64/26
|
|
||||||
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
|
|
||||||
# for example:
|
|
||||||
# allow_hosts=10.0.1.[1-15,20]
|
|
||||||
# allow_hosts=host[01-08,20-25].domain.com
|
|
||||||
# allow_hosts=192.168.5.64/26
|
|
||||||
allow_hosts = *
|
|
||||||
|
|
||||||
# the mode of the files distributed to the data path
|
|
||||||
# 0: round robin(default)
|
|
||||||
# 1: random, distributted by hash code
|
|
||||||
file_distribute_path_mode = 0
|
|
||||||
|
|
||||||
# valid when file_distribute_to_path is set to 0 (round robin).
|
|
||||||
# when the written file count reaches this number, then rotate to next path.
|
|
||||||
# rotate to the first path (00/00) after the last path (such as FF/FF).
|
|
||||||
# default value is 100
|
|
||||||
file_distribute_rotate_count = 100
|
|
||||||
|
|
||||||
# call fsync to disk when write big file
|
|
||||||
# 0: never call fsync
|
|
||||||
# other: call fsync when written bytes >= this bytes
|
|
||||||
# default value is 0 (never call fsync)
|
|
||||||
fsync_after_written_bytes = 0
|
|
||||||
|
|
||||||
# sync log buff to disk every interval seconds
|
|
||||||
# must > 0, default value is 10 seconds
|
|
||||||
sync_log_buff_interval = 1
|
|
||||||
|
|
||||||
# sync binlog buff / cache to disk every interval seconds
|
|
||||||
# default value is 60 seconds
|
|
||||||
sync_binlog_buff_interval = 1
|
|
||||||
|
|
||||||
# sync storage stat info to disk every interval seconds
|
|
||||||
# default value is 300 seconds
|
|
||||||
sync_stat_file_interval = 300
|
|
||||||
|
|
||||||
# thread stack size, should >= 512KB
|
|
||||||
# default value is 512KB
|
|
||||||
thread_stack_size = 512KB
|
|
||||||
|
|
||||||
# the priority as a source server for uploading file.
|
|
||||||
# the lower this value, the higher its uploading priority.
|
|
||||||
# default value is 10
|
|
||||||
upload_priority = 10
|
|
||||||
|
|
||||||
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
|
|
||||||
# multi aliases split by comma. empty value means auto set by OS type
|
|
||||||
# default values is empty
|
|
||||||
if_alias_prefix =
|
|
||||||
|
|
||||||
# if check file duplicate, when set to true, use FastDHT to store file indexes
|
|
||||||
# 1 or yes: need check
|
|
||||||
# 0 or no: do not check
|
|
||||||
# default value is 0
|
|
||||||
check_file_duplicate = 0
|
|
||||||
|
|
||||||
# file signature method for check file duplicate
|
|
||||||
## hash: four 32 bits hash code
|
|
||||||
## md5: MD5 signature
|
|
||||||
# default value is hash
|
|
||||||
# since V4.01
|
|
||||||
file_signature_method = hash
|
|
||||||
|
|
||||||
# namespace for storing file indexes (key-value pairs)
|
|
||||||
# this item must be set when check_file_duplicate is true / on
|
|
||||||
key_namespace = FastDFS
|
|
||||||
|
|
||||||
# set keep_alive to 1 to enable persistent connection with FastDHT servers
|
|
||||||
# default value is 0 (short connection)
|
|
||||||
keep_alive = 0
|
|
||||||
|
|
||||||
# you can use "#include filename" (not include double quotes) directive to
|
|
||||||
# load FastDHT server list, when the filename is a relative path such as
|
|
||||||
# pure filename, the base path is the base path of current/this config file.
|
|
||||||
# must set FastDHT server list when check_file_duplicate is true / on
|
|
||||||
# please see INSTALL of FastDHT for detail
|
|
||||||
##include /home/yuqing/fastdht/conf/fdht_servers.conf
|
|
||||||
|
|
||||||
# if log to access log
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
use_access_log = false
|
|
||||||
|
|
||||||
# if rotate the access log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
rotate_access_log = false
|
|
||||||
|
|
||||||
# rotate access log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.00
|
|
||||||
access_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old access log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_access_log = false
|
|
||||||
|
|
||||||
# compress the access log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_access_log_days_before = 7
|
|
||||||
|
|
||||||
# if rotate the error log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log = false
|
|
||||||
|
|
||||||
# rotate error log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.02
|
|
||||||
error_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old error log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_error_log = false
|
|
||||||
|
|
||||||
# compress the error log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_error_log_days_before = 7
|
|
||||||
|
|
||||||
# rotate access log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_access_log_size = 0
|
|
||||||
|
|
||||||
# rotate error log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log_size = 0
|
|
||||||
|
|
||||||
# keep days of the log files
|
|
||||||
# 0 means do not delete old log files
|
|
||||||
# default value is 0
|
|
||||||
log_file_keep_days = 0
|
|
||||||
|
|
||||||
# if skip the invalid record when sync file
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
file_sync_skip_invalid_record = false
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = true
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# if compress the binlog files by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.01
|
|
||||||
compress_binlog = true
|
|
||||||
|
|
||||||
# try to compress binlog time, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 01:30
|
|
||||||
# since V6.01
|
|
||||||
compress_binlog_time = 01:30
|
|
||||||
|
|
||||||
# if check the mark of store path to prevent confusion
|
|
||||||
# recommend to set this parameter to true
|
|
||||||
# if two storage servers (instances) MUST use a same store path for
|
|
||||||
# some specific purposes, you should set this parameter to false
|
|
||||||
# default value is true
|
|
||||||
# since V6.03
|
|
||||||
check_store_path_mark = true
|
|
||||||
|
|
||||||
# use the ip address of this storage server if domain_name is empty,
|
|
||||||
# else this domain name will ocur in the url redirected by the tracker server
|
|
||||||
http.domain_name =
|
|
||||||
|
|
||||||
# the port of the web server on this storage server
|
|
||||||
http.server_port = 8888
|
|
||||||
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
# <id> <group_name> <ip_or_hostname[:port]>
|
|
||||||
#
|
|
||||||
# id is a natural number (1, 2, 3 etc.),
|
|
||||||
# 6 bits of the id length is enough, such as 100001
|
|
||||||
#
|
|
||||||
# storage ip or hostname can be dual IPs seperated by comma,
|
|
||||||
# one is an inner (intranet) IP and another is an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs
|
|
||||||
# for example: 192.168.2.100,122.244.141.46
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21
|
|
||||||
#
|
|
||||||
# the port is optional. if you run more than one storaged instances
|
|
||||||
# in a server, you must specified the port to distinguish different instances.
|
|
||||||
|
|
||||||
#100001 group1 192.168.0.196
|
|
||||||
#100002 group1 192.168.0.197
|
|
||||||
|
|
@ -1,329 +0,0 @@
|
||||||
# is this config file disabled
|
|
||||||
# false for enabled
|
|
||||||
# true for disabled
|
|
||||||
disabled = false
|
|
||||||
|
|
||||||
# bind an address of this host
|
|
||||||
# empty for bind all addresses of this host
|
|
||||||
bind_addr =
|
|
||||||
|
|
||||||
# the tracker server port
|
|
||||||
port = 22122
|
|
||||||
|
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds for send and recv
|
|
||||||
# default value is 30
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the base path to store data and log files
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# max concurrent connections this server support
|
|
||||||
# you should set this parameter larger, eg. 10240
|
|
||||||
# default value is 256
|
|
||||||
max_connections = 1024
|
|
||||||
|
|
||||||
# accept thread count
|
|
||||||
# default value is 1 which is recommended
|
|
||||||
# since V4.07
|
|
||||||
accept_threads = 1
|
|
||||||
|
|
||||||
# work thread count
|
|
||||||
# work threads to deal network io
|
|
||||||
# default value is 4
|
|
||||||
# since V2.00
|
|
||||||
work_threads = 4
|
|
||||||
|
|
||||||
# the min network buff size
|
|
||||||
# default value 8KB
|
|
||||||
min_buff_size = 8KB
|
|
||||||
|
|
||||||
# the max network buff size
|
|
||||||
# default value 128KB
|
|
||||||
max_buff_size = 128KB
|
|
||||||
|
|
||||||
# the method for selecting group to upload files
|
|
||||||
# 0: round robin
|
|
||||||
# 1: specify group
|
|
||||||
# 2: load balance, select the max free space group to upload file
|
|
||||||
store_lookup = 2
|
|
||||||
|
|
||||||
# which group to upload file
|
|
||||||
# when store_lookup set to 1, must set store_group to the group name
|
|
||||||
store_group = group2
|
|
||||||
|
|
||||||
# which storage server to upload file
|
|
||||||
# 0: round robin (default)
|
|
||||||
# 1: the first server order by ip address
|
|
||||||
# 2: the first server order by priority (the minimal)
|
|
||||||
# Note: if use_trunk_file set to true, must set store_server to 1 or 2
|
|
||||||
store_server = 0
|
|
||||||
|
|
||||||
# which path (means disk or mount point) of the storage server to upload file
|
|
||||||
# 0: round robin
|
|
||||||
# 2: load balance, select the max free space path to upload file
|
|
||||||
store_path = 0
|
|
||||||
|
|
||||||
# which storage server to download file
|
|
||||||
# 0: round robin (default)
|
|
||||||
# 1: the source storage server which the current file uploaded to
|
|
||||||
download_server = 0
|
|
||||||
|
|
||||||
# reserved storage space for system or other applications.
|
|
||||||
# if the free(available) space of any stoarge server in
|
|
||||||
# a group <= reserved_storage_space, no file can be uploaded to this group.
|
|
||||||
# bytes unit can be one of follows:
|
|
||||||
### G or g for gigabyte(GB)
|
|
||||||
### M or m for megabyte(MB)
|
|
||||||
### K or k for kilobyte(KB)
|
|
||||||
### no unit for byte(B)
|
|
||||||
### XX.XX% as ratio such as: reserved_storage_space = 10%
|
|
||||||
reserved_storage_space = 20%
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
#unix group name to run this program,
|
|
||||||
#not set (empty) means run by the group of current user
|
|
||||||
run_by_group=
|
|
||||||
|
|
||||||
#unix username to run this program,
|
|
||||||
#not set (empty) means run by current user
|
|
||||||
run_by_user =
|
|
||||||
|
|
||||||
# allow_hosts can ocur more than once, host can be hostname or ip address,
|
|
||||||
# "*" (only one asterisk) means match all ip addresses
|
|
||||||
# we can use CIDR ips like 192.168.5.64/26
|
|
||||||
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
|
|
||||||
# for example:
|
|
||||||
# allow_hosts=10.0.1.[1-15,20]
|
|
||||||
# allow_hosts=host[01-08,20-25].domain.com
|
|
||||||
# allow_hosts=192.168.5.64/26
|
|
||||||
allow_hosts = *
|
|
||||||
|
|
||||||
# sync log buff to disk every interval seconds
|
|
||||||
# default value is 10 seconds
|
|
||||||
sync_log_buff_interval = 1
|
|
||||||
|
|
||||||
# check storage server alive interval seconds
|
|
||||||
check_active_interval = 120
|
|
||||||
|
|
||||||
# thread stack size, should >= 64KB
|
|
||||||
# default value is 256KB
|
|
||||||
thread_stack_size = 256KB
|
|
||||||
|
|
||||||
# auto adjust when the ip address of the storage server changed
|
|
||||||
# default value is true
|
|
||||||
storage_ip_changed_auto_adjust = true
|
|
||||||
|
|
||||||
# storage sync file max delay seconds
|
|
||||||
# default value is 86400 seconds (one day)
|
|
||||||
# since V2.00
|
|
||||||
storage_sync_file_max_delay = 86400
|
|
||||||
|
|
||||||
# the max time of storage sync a file
|
|
||||||
# default value is 300 seconds
|
|
||||||
# since V2.00
|
|
||||||
storage_sync_file_max_time = 300
|
|
||||||
|
|
||||||
# if use a trunk file to store several small files
|
|
||||||
# default value is false
|
|
||||||
# since V3.00
|
|
||||||
use_trunk_file = false
|
|
||||||
|
|
||||||
# the min slot size, should <= 4KB
|
|
||||||
# default value is 256 bytes
|
|
||||||
# since V3.00
|
|
||||||
slot_min_size = 256
|
|
||||||
|
|
||||||
# the max slot size, should > slot_min_size
|
|
||||||
# store the upload file to trunk file when it's size <= this value
|
|
||||||
# default value is 16MB
|
|
||||||
# since V3.00
|
|
||||||
slot_max_size = 1MB
|
|
||||||
|
|
||||||
# the alignment size to allocate the trunk space
|
|
||||||
# default value is 0 (never align)
|
|
||||||
# since V6.05
|
|
||||||
# NOTE: the larger the alignment size, the less likely of disk
|
|
||||||
# fragmentation, but the more space is wasted.
|
|
||||||
trunk_alloc_alignment_size = 256
|
|
||||||
|
|
||||||
# if merge contiguous free spaces of trunk file
|
|
||||||
# default value is false
|
|
||||||
# since V6.05
|
|
||||||
trunk_free_space_merge = true
|
|
||||||
|
|
||||||
# if delete / reclaim the unused trunk files
|
|
||||||
# default value is false
|
|
||||||
# since V6.05
|
|
||||||
delete_unused_trunk_files = false
|
|
||||||
|
|
||||||
# the trunk file size, should >= 4MB
|
|
||||||
# default value is 64MB
|
|
||||||
# since V3.00
|
|
||||||
trunk_file_size = 64MB
|
|
||||||
|
|
||||||
# if create trunk file advancely
|
|
||||||
# default value is false
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_advance = false
|
|
||||||
|
|
||||||
# the time base to create trunk file
|
|
||||||
# the time format: HH:MM
|
|
||||||
# default value is 02:00
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_time_base = 02:00
|
|
||||||
|
|
||||||
# the interval of create trunk file, unit: second
|
|
||||||
# default value is 38400 (one day)
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_interval = 86400
|
|
||||||
|
|
||||||
# the threshold to create trunk file
|
|
||||||
# when the free trunk file size less than the threshold,
|
|
||||||
# will create he trunk files
|
|
||||||
# default value is 0
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_space_threshold = 20G
|
|
||||||
|
|
||||||
# if check trunk space occupying when loading trunk free spaces
|
|
||||||
# the occupied spaces will be ignored
|
|
||||||
# default value is false
|
|
||||||
# since V3.09
|
|
||||||
# NOTICE: set this parameter to true will slow the loading of trunk spaces
|
|
||||||
# when startup. you should set this parameter to true when neccessary.
|
|
||||||
trunk_init_check_occupying = false
|
|
||||||
|
|
||||||
# if ignore storage_trunk.dat, reload from trunk binlog
|
|
||||||
# default value is false
|
|
||||||
# since V3.10
|
|
||||||
# set to true once for version upgrade when your version less than V3.10
|
|
||||||
trunk_init_reload_from_binlog = false
|
|
||||||
|
|
||||||
# the min interval for compressing the trunk binlog file
|
|
||||||
# unit: second, 0 means never compress
|
|
||||||
# FastDFS compress the trunk binlog when trunk init and trunk destroy
|
|
||||||
# recommand to set this parameter to 86400 (one day)
|
|
||||||
# default value is 0
|
|
||||||
# since V5.01
|
|
||||||
trunk_compress_binlog_min_interval = 86400
|
|
||||||
|
|
||||||
# the interval for compressing the trunk binlog file
|
|
||||||
# unit: second, 0 means never compress
|
|
||||||
# recommand to set this parameter to 86400 (one day)
|
|
||||||
# default value is 0
|
|
||||||
# since V6.05
|
|
||||||
trunk_compress_binlog_interval = 86400
|
|
||||||
|
|
||||||
# compress the trunk binlog time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 03:00
|
|
||||||
# since V6.05
|
|
||||||
trunk_compress_binlog_time_base = 03:00
|
|
||||||
|
|
||||||
# max backups for the trunk binlog file
|
|
||||||
# default value is 0 (never backup)
|
|
||||||
# since V6.05
|
|
||||||
trunk_binlog_max_backups = 7
|
|
||||||
|
|
||||||
# if use storage server ID instead of IP address
|
|
||||||
# if you want to use dual IPs for storage server, you MUST set
|
|
||||||
# this parameter to true, and configure the dual IPs in the file
|
|
||||||
# configured by following item "storage_ids_filename", such as storage_ids.conf
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# this parameter is valid only when use_storage_id set to true
|
|
||||||
# since V4.00
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
# id type of the storage server in the filename, values are:
|
|
||||||
## ip: the ip address of the storage server
|
|
||||||
## id: the server id of the storage server
|
|
||||||
# this paramter is valid only when use_storage_id set to true
|
|
||||||
# default value is ip
|
|
||||||
# since V4.03
|
|
||||||
id_type_in_filename = id
|
|
||||||
|
|
||||||
# if store slave file use symbol link
|
|
||||||
# default value is false
|
|
||||||
# since V4.01
|
|
||||||
store_slave_file_use_link = false
|
|
||||||
|
|
||||||
# if rotate the error log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log = false
|
|
||||||
|
|
||||||
# rotate error log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.02
|
|
||||||
error_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old error log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_error_log = false
|
|
||||||
|
|
||||||
# compress the error log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_error_log_days_before = 7
|
|
||||||
|
|
||||||
# rotate error log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log_size = 0
|
|
||||||
|
|
||||||
# keep days of the log files
|
|
||||||
# 0 means do not delete old log files
|
|
||||||
# default value is 0
|
|
||||||
log_file_keep_days = 0
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = true
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# HTTP port on this tracker server
|
|
||||||
http.server_port = 8080
|
|
||||||
|
|
||||||
# check storage HTTP server alive interval seconds
|
|
||||||
# <= 0 for never check
|
|
||||||
# default value is 30
|
|
||||||
http.check_alive_interval = 30
|
|
||||||
|
|
||||||
# check storage HTTP server alive type, values are:
|
|
||||||
# tcp : connect to the storge server with HTTP port only,
|
|
||||||
# do not request and get response
|
|
||||||
# http: storage check alive url must return http status 200
|
|
||||||
# default value is tcp
|
|
||||||
http.check_alive_type = tcp
|
|
||||||
|
|
||||||
# check storage HTTP server alive uri/url
|
|
||||||
# NOTE: storage embed HTTP server support uri: /status.html
|
|
||||||
http.check_alive_uri = /status.html
|
|
||||||
|
|
||||||
|
|
@ -1,36 +0,0 @@
|
||||||
#http server
|
|
||||||
#
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 9088;
|
|
||||||
server_name localhost;
|
|
||||||
|
|
||||||
#open() “/usr/local/nginx/html/favicon.ico” failed (2: No such file or directory),关闭它即可
|
|
||||||
location = /favicon.ico {
|
|
||||||
log_not_found off;
|
|
||||||
access_log off;
|
|
||||||
}
|
|
||||||
|
|
||||||
#将http文件访问请求反向代理给扩展模块,不打印请求日志
|
|
||||||
location ~/group[0-9]/ {
|
|
||||||
ngx_fastdfs_module;
|
|
||||||
|
|
||||||
log_not_found off;
|
|
||||||
access_log off;
|
|
||||||
}
|
|
||||||
|
|
||||||
# location ~ /group1/M00 {
|
|
||||||
# alias /data/fastdfs/upload/path0;
|
|
||||||
# ngx_fastdfs_module;
|
|
||||||
# }
|
|
||||||
|
|
||||||
# location ~ /group1/M01 {
|
|
||||||
# alias /data/fastdfs/upload/path1;
|
|
||||||
# ngx_fastdfs_module;
|
|
||||||
# }
|
|
||||||
|
|
||||||
error_page 500 502 503 504 /50x.html;
|
|
||||||
location = /50x.html {
|
|
||||||
root html;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
worker_processes 1;
|
|
||||||
worker_rlimit_nofile 65535; #务必先修改服务器的max open files 数。
|
|
||||||
|
|
||||||
error_log /data/fastdfs_data/logs/nginx-error.log;
|
|
||||||
|
|
||||||
events {
|
|
||||||
use epoll; #服务器若是Linux 2.6+,你应该使用epoll。
|
|
||||||
worker_connections 65535;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
include mime.types;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
|
|
||||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
|
||||||
'$status $body_bytes_sent "$http_referer" '
|
|
||||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
|
||||||
|
|
||||||
access_log /data/fastdfs_data/logs/nginx-access.log main;
|
|
||||||
sendfile on;
|
|
||||||
keepalive_timeout 65;
|
|
||||||
|
|
||||||
gzip on;
|
|
||||||
gzip_min_length 2k;
|
|
||||||
gzip_buffers 8 32k;
|
|
||||||
gzip_http_version 1.1;
|
|
||||||
gzip_comp_level 2;
|
|
||||||
gzip_types text/plain text/css text/javascript application/json application/javascript application/x-javascript application/xml;
|
|
||||||
gzip_vary on;
|
|
||||||
|
|
||||||
include /usr/local/nginx/conf.d/*.conf;
|
|
||||||
|
|
||||||
}
|
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -1,55 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# fastdfs 配置文件,我设置的存储路径,需要提前创建
|
|
||||||
FASTDFS_BASE_PATH=/data/fastdfs_data \
|
|
||||||
FASTDFS_STORE_PATH=/data/fastdfs/upload \
|
|
||||||
|
|
||||||
# 启用参数
|
|
||||||
# - tracker : 启动tracker_server 服务
|
|
||||||
# - storage : 启动storage 服务
|
|
||||||
start_parameter=$1
|
|
||||||
|
|
||||||
if [ ! -d "$FASTDFS_BASE_PATH" ]; then
|
|
||||||
mkdir -p ${FASTDFS_BASE_PATH};
|
|
||||||
fi
|
|
||||||
|
|
||||||
function start_tracker(){
|
|
||||||
|
|
||||||
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf
|
|
||||||
tail -f /data/fastdfs_data/logs/trackerd.log
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
function start_storage(){
|
|
||||||
if [ ! -d "$FASTDFS_STORE_PATH" ]; then
|
|
||||||
mkdir -p ${FASTDFS_STORE_PATH}/{path0,path1,path2,path3};
|
|
||||||
fi
|
|
||||||
/usr/bin/fdfs_storaged /etc/fdfs/storage.conf;
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# nginx日志存储目录为/data/fastdfs_data/logs/,手动创建一下,防止storage启动慢,还没有来得及创建logs目录
|
|
||||||
if [ ! -d "$FASTDFS_BASE_PATH/logs" ]; then
|
|
||||||
mkdir -p ${FASTDFS_BASE_PATH}/logs;
|
|
||||||
fi
|
|
||||||
|
|
||||||
/usr/local/nginx/sbin/nginx;
|
|
||||||
tail -f /data/fastdfs_data/logs/storaged.log;
|
|
||||||
}
|
|
||||||
|
|
||||||
function run (){
|
|
||||||
|
|
||||||
case ${start_parameter} in
|
|
||||||
tracker)
|
|
||||||
echo "启动tracker"
|
|
||||||
start_tracker
|
|
||||||
;;
|
|
||||||
storage)
|
|
||||||
echo "启动storage"
|
|
||||||
start_storage
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "请指定要启动哪个服务,tracker还是storage(二选一),传参为tracker | storage"
|
|
||||||
esac
|
|
||||||
}
|
|
||||||
|
|
||||||
run
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 23 KiB |
|
|
@ -1,71 +0,0 @@
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the base path to store log files
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# tracker_server can ocur more than once for multi tracker servers.
|
|
||||||
# the value format of tracker_server is "HOST:PORT",
|
|
||||||
# the HOST can be hostname or ip address,
|
|
||||||
# and the HOST can be dual IPs or hostnames seperated by comma,
|
|
||||||
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs.
|
|
||||||
# for example: 192.168.2.100,122.244.141.46:22122
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21:22122
|
|
||||||
|
|
||||||
tracker_server = 192.168.0.196:22122
|
|
||||||
tracker_server = 192.168.0.197:22122
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = false
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# if load FastDFS parameters from tracker server
|
|
||||||
# since V4.05
|
|
||||||
# default value is false
|
|
||||||
load_fdfs_parameters_from_tracker = false
|
|
||||||
|
|
||||||
# if use storage ID instead of IP address
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V4.05
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
|
|
||||||
#HTTP settings
|
|
||||||
http.tracker_server_port = 80
|
|
||||||
|
|
||||||
#use "#include" directive to include HTTP other settiongs
|
|
||||||
##include http.conf
|
|
||||||
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
||||||
# HTTP default content type
|
|
||||||
http.default_content_type = application/octet-stream
|
|
||||||
|
|
||||||
# MIME types mapping filename
|
|
||||||
# MIME types file format: MIME_type extensions
|
|
||||||
# such as: image/jpeg jpeg jpg jpe
|
|
||||||
# you can use apache's MIME file: mime.types
|
|
||||||
http.mime_types_filename = mime.types
|
|
||||||
|
|
||||||
# if use token to anti-steal
|
|
||||||
# default value is false (0)
|
|
||||||
http.anti_steal.check_token = false
|
|
||||||
|
|
||||||
# token TTL (time to live), seconds
|
|
||||||
# default value is 600
|
|
||||||
http.anti_steal.token_ttl = 900
|
|
||||||
|
|
||||||
# secret key to generate anti-steal token
|
|
||||||
# this parameter must be set when http.anti_steal.check_token set to true
|
|
||||||
# the length of the secret key should not exceed 128 bytes
|
|
||||||
http.anti_steal.secret_key = FastDFS1234567890
|
|
||||||
|
|
||||||
# return the content of the file when check token fail
|
|
||||||
# default value is empty (no file sepecified)
|
|
||||||
http.anti_steal.token_check_fail = /home/yuqing/fastdfs/conf/anti-steal.jpg
|
|
||||||
|
|
||||||
# if support multi regions for HTTP Range
|
|
||||||
# default value is true
|
|
||||||
http.multi_range.enabed = true
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,137 +0,0 @@
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
connect_timeout=15
|
|
||||||
|
|
||||||
# network recv and send timeout in seconds
|
|
||||||
# default value is 30s
|
|
||||||
network_timeout=30
|
|
||||||
|
|
||||||
# the base path to store log files
|
|
||||||
base_path=/data/fastdfs_data
|
|
||||||
|
|
||||||
# if load FastDFS parameters from tracker server
|
|
||||||
# since V1.12
|
|
||||||
# default value is false
|
|
||||||
load_fdfs_parameters_from_tracker=true
|
|
||||||
|
|
||||||
# storage sync file max delay seconds
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V1.12
|
|
||||||
# default value is 86400 seconds (one day)
|
|
||||||
storage_sync_file_max_delay = 86400
|
|
||||||
|
|
||||||
# if use storage ID instead of IP address
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# default value is false
|
|
||||||
# since V1.13
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# same as tracker.conf
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is false
|
|
||||||
# since V1.13
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
# FastDFS tracker_server can ocur more than once, and tracker_server format is
|
|
||||||
# "host:port", host can be hostname or ip address
|
|
||||||
# valid only when load_fdfs_parameters_from_tracker is true
|
|
||||||
tracker_server = 192.168.209.121:22122
|
|
||||||
tracker_server = 192.168.209.122:22122
|
|
||||||
|
|
||||||
# the port of the local storage server
|
|
||||||
# the default value is 23000
|
|
||||||
storage_server_port=23000
|
|
||||||
|
|
||||||
# the group name of the local storage server
|
|
||||||
group_name=group1
|
|
||||||
|
|
||||||
# if the url / uri including the group name
|
|
||||||
# set to false when uri like /M00/00/00/xxx
|
|
||||||
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx
|
|
||||||
# default value is false
|
|
||||||
url_have_group_name = true
|
|
||||||
|
|
||||||
# path(disk or mount point) count, default value is 1
|
|
||||||
# must same as storage.conf
|
|
||||||
store_path_count=1
|
|
||||||
|
|
||||||
# store_path#, based 0, if store_path0 not exists, it's value is base_path
|
|
||||||
# the paths must be exist
|
|
||||||
# must same as storage.conf
|
|
||||||
store_path0=/data/fastdfs/upload/path0
|
|
||||||
#store_path1=/home/yuqing/fastdfs1
|
|
||||||
|
|
||||||
# standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level=info
|
|
||||||
|
|
||||||
# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log
|
|
||||||
# empty for output to stderr (apache and nginx error_log file)
|
|
||||||
log_filename=
|
|
||||||
|
|
||||||
# response mode when the file not exist in the local file system
|
|
||||||
## proxy: get the content from other storage server, then send to client
|
|
||||||
## redirect: redirect to the original storage server (HTTP Header is Location)
|
|
||||||
response_mode=proxy
|
|
||||||
|
|
||||||
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
|
|
||||||
# multi aliases split by comma. empty value means auto set by OS type
|
|
||||||
# this paramter used to get all ip address of the local host
|
|
||||||
# default values is empty
|
|
||||||
if_alias_prefix=
|
|
||||||
|
|
||||||
# use "#include" directive to include HTTP config file
|
|
||||||
# NOTE: #include is an include directive, do NOT remove the # before include
|
|
||||||
#include http.conf
|
|
||||||
|
|
||||||
|
|
||||||
# if support flv
|
|
||||||
# default value is false
|
|
||||||
# since v1.15
|
|
||||||
flv_support = true
|
|
||||||
|
|
||||||
# flv file extension name
|
|
||||||
# default value is flv
|
|
||||||
# since v1.15
|
|
||||||
flv_extension = flv
|
|
||||||
|
|
||||||
|
|
||||||
## 如果在此存储服务器上支持多组时,有几组就设置几组。单组为0.
|
|
||||||
## 一台服务器没有必要运行多个group的storage,因为stroage本身支持多存储目录的
|
|
||||||
# set the group count
|
|
||||||
# set to none zero to support multi-group on this storage server
|
|
||||||
# set to 0 for single group only
|
|
||||||
# groups settings section as [group1], [group2], ..., [groupN]
|
|
||||||
# default value is 0
|
|
||||||
# since v1.14
|
|
||||||
group_count = 0
|
|
||||||
|
|
||||||
## 如果在此存储服务器上支持多组时,有几组就设置几组
|
|
||||||
# group settings for group #1
|
|
||||||
# since v1.14
|
|
||||||
# when support multi-group on this storage server, uncomment following section
|
|
||||||
#[group1]
|
|
||||||
#group_name=group1
|
|
||||||
#storage_server_port=23000
|
|
||||||
#store_path_count=2
|
|
||||||
#store_path0=/home/yuqing/fastdfs
|
|
||||||
#store_path1=/home/yuqing/fastdfs1
|
|
||||||
|
|
||||||
# group settings for group #2
|
|
||||||
# since v1.14
|
|
||||||
# when support multi-group, uncomment following section as neccessary
|
|
||||||
#[group2]
|
|
||||||
#group_name=group2
|
|
||||||
#storage_server_port=23000
|
|
||||||
#store_path_count=1
|
|
||||||
#store_path0=/home/yuqing/fastdfs
|
|
||||||
|
|
||||||
|
|
@ -1,353 +0,0 @@
|
||||||
# is this config file disabled
|
|
||||||
# false for enabled
|
|
||||||
# true for disabled
|
|
||||||
disabled = false
|
|
||||||
|
|
||||||
# the name of the group this storage server belongs to
|
|
||||||
#
|
|
||||||
# comment or remove this item for fetching from tracker server,
|
|
||||||
# in this case, use_storage_id must set to true in tracker.conf,
|
|
||||||
# and storage_ids.conf must be configured correctly.
|
|
||||||
group_name = group1
|
|
||||||
|
|
||||||
# bind an address of this host
|
|
||||||
# empty for bind all addresses of this host
|
|
||||||
bind_addr =
|
|
||||||
|
|
||||||
# if bind an address of this host when connect to other servers
|
|
||||||
# (this storage server as a client)
|
|
||||||
# true for binding the address configured by the above parameter: "bind_addr"
|
|
||||||
# false for binding any address of this host
|
|
||||||
client_bind = true
|
|
||||||
|
|
||||||
# the storage server port
|
|
||||||
port = 23000
|
|
||||||
|
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds for send and recv
|
|
||||||
# default value is 30
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the heart beat interval in seconds
|
|
||||||
# the storage server send heartbeat to tracker server periodically
|
|
||||||
# default value is 30
|
|
||||||
heart_beat_interval = 30
|
|
||||||
|
|
||||||
# disk usage report interval in seconds
|
|
||||||
# the storage server send disk usage report to tracker server periodically
|
|
||||||
# default value is 300
|
|
||||||
stat_report_interval = 60
|
|
||||||
|
|
||||||
# the base path to store data and log files
|
|
||||||
# NOTE: the binlog files maybe are large, make sure
|
|
||||||
# the base path has enough disk space,
|
|
||||||
# eg. the disk free space should > 50GB
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# max concurrent connections the server supported,
|
|
||||||
# you should set this parameter larger, eg. 10240
|
|
||||||
# default value is 256
|
|
||||||
max_connections = 1024
|
|
||||||
|
|
||||||
# the buff size to recv / send data from/to network
|
|
||||||
# this parameter must more than 8KB
|
|
||||||
# 256KB or 512KB is recommended
|
|
||||||
# default value is 64KB
|
|
||||||
# since V2.00
|
|
||||||
buff_size = 256KB
|
|
||||||
|
|
||||||
# accept thread count
|
|
||||||
# default value is 1 which is recommended
|
|
||||||
# since V4.07
|
|
||||||
accept_threads = 1
|
|
||||||
|
|
||||||
# work thread count
|
|
||||||
# work threads to deal network io
|
|
||||||
# default value is 4
|
|
||||||
# since V2.00
|
|
||||||
work_threads = 4
|
|
||||||
|
|
||||||
# if disk read / write separated
|
|
||||||
## false for mixed read and write
|
|
||||||
## true for separated read and write
|
|
||||||
# default value is true
|
|
||||||
# since V2.00
|
|
||||||
disk_rw_separated = true
|
|
||||||
|
|
||||||
# disk reader thread count per store path
|
|
||||||
# for mixed read / write, this parameter can be 0
|
|
||||||
# default value is 1
|
|
||||||
# since V2.00
|
|
||||||
disk_reader_threads = 1
|
|
||||||
|
|
||||||
# disk writer thread count per store path
|
|
||||||
# for mixed read / write, this parameter can be 0
|
|
||||||
# default value is 1
|
|
||||||
# since V2.00
|
|
||||||
disk_writer_threads = 1
|
|
||||||
|
|
||||||
# when no entry to sync, try read binlog again after X milliseconds
|
|
||||||
# must > 0, default value is 200ms
|
|
||||||
sync_wait_msec = 50
|
|
||||||
|
|
||||||
# after sync a file, usleep milliseconds
|
|
||||||
# 0 for sync successively (never call usleep)
|
|
||||||
sync_interval = 0
|
|
||||||
|
|
||||||
# storage sync start time of a day, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
sync_start_time = 00:00
|
|
||||||
|
|
||||||
# storage sync end time of a day, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
sync_end_time = 23:59
|
|
||||||
|
|
||||||
# write to the mark file after sync N files
|
|
||||||
# default value is 500
|
|
||||||
write_mark_file_freq = 500
|
|
||||||
|
|
||||||
# disk recovery thread count
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
disk_recovery_threads = 3
|
|
||||||
|
|
||||||
# store path (disk or mount point) count, default value is 1
|
|
||||||
store_path_count = 1
|
|
||||||
|
|
||||||
# store_path#, based on 0, to configure the store paths to store files
|
|
||||||
# if store_path0 not exists, it's value is base_path (NOT recommended)
|
|
||||||
# the paths must be exist.
|
|
||||||
#
|
|
||||||
# IMPORTANT NOTE:
|
|
||||||
# the store paths' order is very important, don't mess up!!!
|
|
||||||
# the base_path should be independent (different) of the store paths
|
|
||||||
|
|
||||||
store_path0 = /data/fastdfs/upload/path0
|
|
||||||
#store_path1 = /home/yuqing/fastdfs2
|
|
||||||
|
|
||||||
# subdir_count * subdir_count directories will be auto created under each
|
|
||||||
# store_path (disk), value can be 1 to 256, default value is 256
|
|
||||||
subdir_count_per_path = 256
|
|
||||||
|
|
||||||
# tracker_server can ocur more than once for multi tracker servers.
|
|
||||||
# the value format of tracker_server is "HOST:PORT",
|
|
||||||
# the HOST can be hostname or ip address,
|
|
||||||
# and the HOST can be dual IPs or hostnames seperated by comma,
|
|
||||||
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs.
|
|
||||||
# for example: 192.168.2.100,122.244.141.46:22122
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21:22122
|
|
||||||
|
|
||||||
tracker_server = 192.168.209.121:22122
|
|
||||||
tracker_server = 192.168.209.122:22122
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
#unix group name to run this program,
|
|
||||||
#not set (empty) means run by the group of current user
|
|
||||||
run_by_group =
|
|
||||||
|
|
||||||
#unix username to run this program,
|
|
||||||
#not set (empty) means run by current user
|
|
||||||
run_by_user =
|
|
||||||
|
|
||||||
# allow_hosts can ocur more than once, host can be hostname or ip address,
|
|
||||||
# "*" (only one asterisk) means match all ip addresses
|
|
||||||
# we can use CIDR ips like 192.168.5.64/26
|
|
||||||
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
|
|
||||||
# for example:
|
|
||||||
# allow_hosts=10.0.1.[1-15,20]
|
|
||||||
# allow_hosts=host[01-08,20-25].domain.com
|
|
||||||
# allow_hosts=192.168.5.64/26
|
|
||||||
allow_hosts = *
|
|
||||||
|
|
||||||
# the mode of the files distributed to the data path
|
|
||||||
# 0: round robin(default)
|
|
||||||
# 1: random, distributted by hash code
|
|
||||||
file_distribute_path_mode = 0
|
|
||||||
|
|
||||||
# valid when file_distribute_to_path is set to 0 (round robin).
|
|
||||||
# when the written file count reaches this number, then rotate to next path.
|
|
||||||
# rotate to the first path (00/00) after the last path (such as FF/FF).
|
|
||||||
# default value is 100
|
|
||||||
file_distribute_rotate_count = 100
|
|
||||||
|
|
||||||
# call fsync to disk when write big file
|
|
||||||
# 0: never call fsync
|
|
||||||
# other: call fsync when written bytes >= this bytes
|
|
||||||
# default value is 0 (never call fsync)
|
|
||||||
fsync_after_written_bytes = 0
|
|
||||||
|
|
||||||
# sync log buff to disk every interval seconds
|
|
||||||
# must > 0, default value is 10 seconds
|
|
||||||
sync_log_buff_interval = 1
|
|
||||||
|
|
||||||
# sync binlog buff / cache to disk every interval seconds
|
|
||||||
# default value is 60 seconds
|
|
||||||
sync_binlog_buff_interval = 1
|
|
||||||
|
|
||||||
# sync storage stat info to disk every interval seconds
|
|
||||||
# default value is 300 seconds
|
|
||||||
sync_stat_file_interval = 300
|
|
||||||
|
|
||||||
# thread stack size, should >= 512KB
|
|
||||||
# default value is 512KB
|
|
||||||
thread_stack_size = 512KB
|
|
||||||
|
|
||||||
# the priority as a source server for uploading file.
|
|
||||||
# the lower this value, the higher its uploading priority.
|
|
||||||
# default value is 10
|
|
||||||
upload_priority = 10
|
|
||||||
|
|
||||||
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
|
|
||||||
# multi aliases split by comma. empty value means auto set by OS type
|
|
||||||
# default values is empty
|
|
||||||
if_alias_prefix =
|
|
||||||
|
|
||||||
# if check file duplicate, when set to true, use FastDHT to store file indexes
|
|
||||||
# 1 or yes: need check
|
|
||||||
# 0 or no: do not check
|
|
||||||
# default value is 0
|
|
||||||
check_file_duplicate = 0
|
|
||||||
|
|
||||||
# file signature method for check file duplicate
|
|
||||||
## hash: four 32 bits hash code
|
|
||||||
## md5: MD5 signature
|
|
||||||
# default value is hash
|
|
||||||
# since V4.01
|
|
||||||
file_signature_method = hash
|
|
||||||
|
|
||||||
# namespace for storing file indexes (key-value pairs)
|
|
||||||
# this item must be set when check_file_duplicate is true / on
|
|
||||||
key_namespace = FastDFS
|
|
||||||
|
|
||||||
# set keep_alive to 1 to enable persistent connection with FastDHT servers
|
|
||||||
# default value is 0 (short connection)
|
|
||||||
keep_alive = 0
|
|
||||||
|
|
||||||
# you can use "#include filename" (not include double quotes) directive to
|
|
||||||
# load FastDHT server list, when the filename is a relative path such as
|
|
||||||
# pure filename, the base path is the base path of current/this config file.
|
|
||||||
# must set FastDHT server list when check_file_duplicate is true / on
|
|
||||||
# please see INSTALL of FastDHT for detail
|
|
||||||
##include /home/yuqing/fastdht/conf/fdht_servers.conf
|
|
||||||
|
|
||||||
# if log to access log
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
use_access_log = false
|
|
||||||
|
|
||||||
# if rotate the access log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
rotate_access_log = false
|
|
||||||
|
|
||||||
# rotate access log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.00
|
|
||||||
access_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old access log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_access_log = false
|
|
||||||
|
|
||||||
# compress the access log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_access_log_days_before = 7
|
|
||||||
|
|
||||||
# if rotate the error log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log = false
|
|
||||||
|
|
||||||
# rotate error log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.02
|
|
||||||
error_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old error log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_error_log = false
|
|
||||||
|
|
||||||
# compress the error log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_error_log_days_before = 7
|
|
||||||
|
|
||||||
# rotate access log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_access_log_size = 0
|
|
||||||
|
|
||||||
# rotate error log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log_size = 0
|
|
||||||
|
|
||||||
# keep days of the log files
|
|
||||||
# 0 means do not delete old log files
|
|
||||||
# default value is 0
|
|
||||||
log_file_keep_days = 0
|
|
||||||
|
|
||||||
# if skip the invalid record when sync file
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
file_sync_skip_invalid_record = false
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = true
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# if compress the binlog files by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.01
|
|
||||||
compress_binlog = true
|
|
||||||
|
|
||||||
# try to compress binlog time, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 01:30
|
|
||||||
# since V6.01
|
|
||||||
compress_binlog_time = 01:30
|
|
||||||
|
|
||||||
# if check the mark of store path to prevent confusion
|
|
||||||
# recommend to set this parameter to true
|
|
||||||
# if two storage servers (instances) MUST use a same store path for
|
|
||||||
# some specific purposes, you should set this parameter to false
|
|
||||||
# default value is true
|
|
||||||
# since V6.03
|
|
||||||
check_store_path_mark = true
|
|
||||||
|
|
||||||
# use the ip address of this storage server if domain_name is empty,
|
|
||||||
# else this domain name will ocur in the url redirected by the tracker server
|
|
||||||
http.domain_name =
|
|
||||||
|
|
||||||
# the port of the web server on this storage server
|
|
||||||
http.server_port = 8888
|
|
||||||
|
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
# <id> <group_name> <ip_or_hostname[:port]>
|
|
||||||
#
|
|
||||||
# id is a natural number (1, 2, 3 etc.),
|
|
||||||
# 6 bits of the id length is enough, such as 100001
|
|
||||||
#
|
|
||||||
# storage ip or hostname can be dual IPs seperated by comma,
|
|
||||||
# one is an inner (intranet) IP and another is an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs
|
|
||||||
# for example: 192.168.2.100,122.244.141.46
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21
|
|
||||||
#
|
|
||||||
# the port is optional. if you run more than one storaged instances
|
|
||||||
# in a server, you must specified the port to distinguish different instances.
|
|
||||||
|
|
||||||
#100001 group1 192.168.0.196
|
|
||||||
#100002 group1 192.168.0.197
|
|
||||||
|
|
@ -1,329 +0,0 @@
|
||||||
# is this config file disabled
|
|
||||||
# false for enabled
|
|
||||||
# true for disabled
|
|
||||||
disabled = false
|
|
||||||
|
|
||||||
# bind an address of this host
|
|
||||||
# empty for bind all addresses of this host
|
|
||||||
bind_addr =
|
|
||||||
|
|
||||||
# the tracker server port
|
|
||||||
port = 22122
|
|
||||||
|
|
||||||
# connect timeout in seconds
|
|
||||||
# default value is 30
|
|
||||||
# Note: in the intranet network (LAN), 2 seconds is enough.
|
|
||||||
connect_timeout = 5
|
|
||||||
|
|
||||||
# network timeout in seconds for send and recv
|
|
||||||
# default value is 30
|
|
||||||
network_timeout = 60
|
|
||||||
|
|
||||||
# the base path to store data and log files
|
|
||||||
base_path = /data/fastdfs_data
|
|
||||||
|
|
||||||
# max concurrent connections this server support
|
|
||||||
# you should set this parameter larger, eg. 10240
|
|
||||||
# default value is 256
|
|
||||||
max_connections = 1024
|
|
||||||
|
|
||||||
# accept thread count
|
|
||||||
# default value is 1 which is recommended
|
|
||||||
# since V4.07
|
|
||||||
accept_threads = 1
|
|
||||||
|
|
||||||
# work thread count
|
|
||||||
# work threads to deal network io
|
|
||||||
# default value is 4
|
|
||||||
# since V2.00
|
|
||||||
work_threads = 4
|
|
||||||
|
|
||||||
# the min network buff size
|
|
||||||
# default value 8KB
|
|
||||||
min_buff_size = 8KB
|
|
||||||
|
|
||||||
# the max network buff size
|
|
||||||
# default value 128KB
|
|
||||||
max_buff_size = 128KB
|
|
||||||
|
|
||||||
# the method for selecting group to upload files
|
|
||||||
# 0: round robin
|
|
||||||
# 1: specify group
|
|
||||||
# 2: load balance, select the max free space group to upload file
|
|
||||||
store_lookup = 2
|
|
||||||
|
|
||||||
# which group to upload file
|
|
||||||
# when store_lookup set to 1, must set store_group to the group name
|
|
||||||
store_group = group2
|
|
||||||
|
|
||||||
# which storage server to upload file
|
|
||||||
# 0: round robin (default)
|
|
||||||
# 1: the first server order by ip address
|
|
||||||
# 2: the first server order by priority (the minimal)
|
|
||||||
# Note: if use_trunk_file set to true, must set store_server to 1 or 2
|
|
||||||
store_server = 0
|
|
||||||
|
|
||||||
# which path (means disk or mount point) of the storage server to upload file
|
|
||||||
# 0: round robin
|
|
||||||
# 2: load balance, select the max free space path to upload file
|
|
||||||
store_path = 0
|
|
||||||
|
|
||||||
# which storage server to download file
|
|
||||||
# 0: round robin (default)
|
|
||||||
# 1: the source storage server which the current file uploaded to
|
|
||||||
download_server = 0
|
|
||||||
|
|
||||||
# reserved storage space for system or other applications.
|
|
||||||
# if the free(available) space of any stoarge server in
|
|
||||||
# a group <= reserved_storage_space, no file can be uploaded to this group.
|
|
||||||
# bytes unit can be one of follows:
|
|
||||||
### G or g for gigabyte(GB)
|
|
||||||
### M or m for megabyte(MB)
|
|
||||||
### K or k for kilobyte(KB)
|
|
||||||
### no unit for byte(B)
|
|
||||||
### XX.XX% as ratio such as: reserved_storage_space = 10%
|
|
||||||
reserved_storage_space = 10%
|
|
||||||
|
|
||||||
#standard log level as syslog, case insensitive, value list:
|
|
||||||
### emerg for emergency
|
|
||||||
### alert
|
|
||||||
### crit for critical
|
|
||||||
### error
|
|
||||||
### warn for warning
|
|
||||||
### notice
|
|
||||||
### info
|
|
||||||
### debug
|
|
||||||
log_level = info
|
|
||||||
|
|
||||||
#unix group name to run this program,
|
|
||||||
#not set (empty) means run by the group of current user
|
|
||||||
run_by_group=
|
|
||||||
|
|
||||||
#unix username to run this program,
|
|
||||||
#not set (empty) means run by current user
|
|
||||||
run_by_user =
|
|
||||||
|
|
||||||
# allow_hosts can ocur more than once, host can be hostname or ip address,
|
|
||||||
# "*" (only one asterisk) means match all ip addresses
|
|
||||||
# we can use CIDR ips like 192.168.5.64/26
|
|
||||||
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
|
|
||||||
# for example:
|
|
||||||
# allow_hosts=10.0.1.[1-15,20]
|
|
||||||
# allow_hosts=host[01-08,20-25].domain.com
|
|
||||||
# allow_hosts=192.168.5.64/26
|
|
||||||
allow_hosts = *
|
|
||||||
|
|
||||||
# sync log buff to disk every interval seconds
|
|
||||||
# default value is 10 seconds
|
|
||||||
sync_log_buff_interval = 1
|
|
||||||
|
|
||||||
# check storage server alive interval seconds
|
|
||||||
check_active_interval = 120
|
|
||||||
|
|
||||||
# thread stack size, should >= 64KB
|
|
||||||
# default value is 256KB
|
|
||||||
thread_stack_size = 256KB
|
|
||||||
|
|
||||||
# auto adjust when the ip address of the storage server changed
|
|
||||||
# default value is true
|
|
||||||
storage_ip_changed_auto_adjust = true
|
|
||||||
|
|
||||||
# storage sync file max delay seconds
|
|
||||||
# default value is 86400 seconds (one day)
|
|
||||||
# since V2.00
|
|
||||||
storage_sync_file_max_delay = 86400
|
|
||||||
|
|
||||||
# the max time of storage sync a file
|
|
||||||
# default value is 300 seconds
|
|
||||||
# since V2.00
|
|
||||||
storage_sync_file_max_time = 300
|
|
||||||
|
|
||||||
# if use a trunk file to store several small files
|
|
||||||
# default value is false
|
|
||||||
# since V3.00
|
|
||||||
use_trunk_file = false
|
|
||||||
|
|
||||||
# the min slot size, should <= 4KB
|
|
||||||
# default value is 256 bytes
|
|
||||||
# since V3.00
|
|
||||||
slot_min_size = 256
|
|
||||||
|
|
||||||
# the max slot size, should > slot_min_size
|
|
||||||
# store the upload file to trunk file when it's size <= this value
|
|
||||||
# default value is 16MB
|
|
||||||
# since V3.00
|
|
||||||
slot_max_size = 1MB
|
|
||||||
|
|
||||||
# the alignment size to allocate the trunk space
|
|
||||||
# default value is 0 (never align)
|
|
||||||
# since V6.05
|
|
||||||
# NOTE: the larger the alignment size, the less likely of disk
|
|
||||||
# fragmentation, but the more space is wasted.
|
|
||||||
trunk_alloc_alignment_size = 256
|
|
||||||
|
|
||||||
# if merge contiguous free spaces of trunk file
|
|
||||||
# default value is false
|
|
||||||
# since V6.05
|
|
||||||
trunk_free_space_merge = true
|
|
||||||
|
|
||||||
# if delete / reclaim the unused trunk files
|
|
||||||
# default value is false
|
|
||||||
# since V6.05
|
|
||||||
delete_unused_trunk_files = false
|
|
||||||
|
|
||||||
# the trunk file size, should >= 4MB
|
|
||||||
# default value is 64MB
|
|
||||||
# since V3.00
|
|
||||||
trunk_file_size = 64MB
|
|
||||||
|
|
||||||
# if create trunk file advancely
|
|
||||||
# default value is false
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_advance = false
|
|
||||||
|
|
||||||
# the time base to create trunk file
|
|
||||||
# the time format: HH:MM
|
|
||||||
# default value is 02:00
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_time_base = 02:00
|
|
||||||
|
|
||||||
# the interval of create trunk file, unit: second
|
|
||||||
# default value is 38400 (one day)
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_interval = 86400
|
|
||||||
|
|
||||||
# the threshold to create trunk file
|
|
||||||
# when the free trunk file size less than the threshold,
|
|
||||||
# will create he trunk files
|
|
||||||
# default value is 0
|
|
||||||
# since V3.06
|
|
||||||
trunk_create_file_space_threshold = 20G
|
|
||||||
|
|
||||||
# if check trunk space occupying when loading trunk free spaces
|
|
||||||
# the occupied spaces will be ignored
|
|
||||||
# default value is false
|
|
||||||
# since V3.09
|
|
||||||
# NOTICE: set this parameter to true will slow the loading of trunk spaces
|
|
||||||
# when startup. you should set this parameter to true when neccessary.
|
|
||||||
trunk_init_check_occupying = false
|
|
||||||
|
|
||||||
# if ignore storage_trunk.dat, reload from trunk binlog
|
|
||||||
# default value is false
|
|
||||||
# since V3.10
|
|
||||||
# set to true once for version upgrade when your version less than V3.10
|
|
||||||
trunk_init_reload_from_binlog = false
|
|
||||||
|
|
||||||
# the min interval for compressing the trunk binlog file
|
|
||||||
# unit: second, 0 means never compress
|
|
||||||
# FastDFS compress the trunk binlog when trunk init and trunk destroy
|
|
||||||
# recommand to set this parameter to 86400 (one day)
|
|
||||||
# default value is 0
|
|
||||||
# since V5.01
|
|
||||||
trunk_compress_binlog_min_interval = 86400
|
|
||||||
|
|
||||||
# the interval for compressing the trunk binlog file
|
|
||||||
# unit: second, 0 means never compress
|
|
||||||
# recommand to set this parameter to 86400 (one day)
|
|
||||||
# default value is 0
|
|
||||||
# since V6.05
|
|
||||||
trunk_compress_binlog_interval = 86400
|
|
||||||
|
|
||||||
# compress the trunk binlog time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 03:00
|
|
||||||
# since V6.05
|
|
||||||
trunk_compress_binlog_time_base = 03:00
|
|
||||||
|
|
||||||
# max backups for the trunk binlog file
|
|
||||||
# default value is 0 (never backup)
|
|
||||||
# since V6.05
|
|
||||||
trunk_binlog_max_backups = 7
|
|
||||||
|
|
||||||
# if use storage server ID instead of IP address
|
|
||||||
# if you want to use dual IPs for storage server, you MUST set
|
|
||||||
# this parameter to true, and configure the dual IPs in the file
|
|
||||||
# configured by following item "storage_ids_filename", such as storage_ids.conf
|
|
||||||
# default value is false
|
|
||||||
# since V4.00
|
|
||||||
use_storage_id = false
|
|
||||||
|
|
||||||
# specify storage ids filename, can use relative or absolute path
|
|
||||||
# this parameter is valid only when use_storage_id set to true
|
|
||||||
# since V4.00
|
|
||||||
storage_ids_filename = storage_ids.conf
|
|
||||||
|
|
||||||
# id type of the storage server in the filename, values are:
|
|
||||||
## ip: the ip address of the storage server
|
|
||||||
## id: the server id of the storage server
|
|
||||||
# this paramter is valid only when use_storage_id set to true
|
|
||||||
# default value is ip
|
|
||||||
# since V4.03
|
|
||||||
id_type_in_filename = id
|
|
||||||
|
|
||||||
# if store slave file use symbol link
|
|
||||||
# default value is false
|
|
||||||
# since V4.01
|
|
||||||
store_slave_file_use_link = false
|
|
||||||
|
|
||||||
# if rotate the error log every day
|
|
||||||
# default value is false
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log = false
|
|
||||||
|
|
||||||
# rotate error log time base, time format: Hour:Minute
|
|
||||||
# Hour from 0 to 23, Minute from 0 to 59
|
|
||||||
# default value is 00:00
|
|
||||||
# since V4.02
|
|
||||||
error_log_rotate_time = 00:00
|
|
||||||
|
|
||||||
# if compress the old error log by gzip
|
|
||||||
# default value is false
|
|
||||||
# since V6.04
|
|
||||||
compress_old_error_log = false
|
|
||||||
|
|
||||||
# compress the error log days before
|
|
||||||
# default value is 1
|
|
||||||
# since V6.04
|
|
||||||
compress_error_log_days_before = 7
|
|
||||||
|
|
||||||
# rotate error log when the log file exceeds this size
|
|
||||||
# 0 means never rotates log file by log file size
|
|
||||||
# default value is 0
|
|
||||||
# since V4.02
|
|
||||||
rotate_error_log_size = 0
|
|
||||||
|
|
||||||
# keep days of the log files
|
|
||||||
# 0 means do not delete old log files
|
|
||||||
# default value is 0
|
|
||||||
log_file_keep_days = 0
|
|
||||||
|
|
||||||
# if use connection pool
|
|
||||||
# default value is false
|
|
||||||
# since V4.05
|
|
||||||
use_connection_pool = true
|
|
||||||
|
|
||||||
# connections whose the idle time exceeds this time will be closed
|
|
||||||
# unit: second
|
|
||||||
# default value is 3600
|
|
||||||
# since V4.05
|
|
||||||
connection_pool_max_idle_time = 3600
|
|
||||||
|
|
||||||
# HTTP port on this tracker server
|
|
||||||
http.server_port = 8080
|
|
||||||
|
|
||||||
# check storage HTTP server alive interval seconds
|
|
||||||
# <= 0 for never check
|
|
||||||
# default value is 30
|
|
||||||
http.check_alive_interval = 30
|
|
||||||
|
|
||||||
# check storage HTTP server alive type, values are:
|
|
||||||
# tcp : connect to the storge server with HTTP port only,
|
|
||||||
# do not request and get response
|
|
||||||
# http: storage check alive url must return http status 200
|
|
||||||
# default value is tcp
|
|
||||||
http.check_alive_type = tcp
|
|
||||||
|
|
||||||
# check storage HTTP server alive uri/url
|
|
||||||
# NOTE: storage embed HTTP server support uri: /status.html
|
|
||||||
http.check_alive_uri = /status.html
|
|
||||||
|
|
||||||
|
|
@ -1,37 +0,0 @@
|
||||||
#http server
|
|
||||||
#
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 9088;
|
|
||||||
server_name localhost;
|
|
||||||
|
|
||||||
#open() “/usr/local/nginx/html/favicon.ico” failed (2: No such file or directory),关闭它即可
|
|
||||||
location = /favicon.ico {
|
|
||||||
log_not_found off;
|
|
||||||
access_log off;
|
|
||||||
}
|
|
||||||
|
|
||||||
#将http文件访问请求反向代理给扩展模块,不打印请求日志
|
|
||||||
location ~/group[0-9]/ {
|
|
||||||
ngx_fastdfs_module;
|
|
||||||
|
|
||||||
log_not_found off;
|
|
||||||
access_log off;
|
|
||||||
}
|
|
||||||
|
|
||||||
#若一个group内只有一个storage,直接从本地磁盘上查找文件
|
|
||||||
# location ~ /group1/M00 {
|
|
||||||
# alias /data/fastdfs/upload/path0;
|
|
||||||
# ngx_fastdfs_module;
|
|
||||||
# }
|
|
||||||
|
|
||||||
# location ~ /group1/M01 {
|
|
||||||
# alias /data/fastdfs/upload/path1;
|
|
||||||
# ngx_fastdfs_module;
|
|
||||||
# }
|
|
||||||
|
|
||||||
error_page 500 502 503 504 /50x.html;
|
|
||||||
location = /50x.html {
|
|
||||||
root html;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,33 +0,0 @@
|
||||||
worker_processes 1;
|
|
||||||
worker_rlimit_nofile 65535; #务必先修改服务器的max open files 数。
|
|
||||||
|
|
||||||
error_log /data/fastdfs_data/logs/nginx-error.log;
|
|
||||||
|
|
||||||
events {
|
|
||||||
use epoll; #服务器若是Linux 2.6+,你应该使用epoll。
|
|
||||||
worker_connections 65535;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
include mime.types;
|
|
||||||
default_type application/octet-stream;
|
|
||||||
|
|
||||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
|
||||||
'$status $body_bytes_sent "$http_referer" '
|
|
||||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
|
||||||
|
|
||||||
access_log /data/fastdfs_data/logs/nginx-access.log main;
|
|
||||||
sendfile on;
|
|
||||||
keepalive_timeout 65;
|
|
||||||
|
|
||||||
gzip on;
|
|
||||||
gzip_min_length 2k;
|
|
||||||
gzip_buffers 8 32k;
|
|
||||||
gzip_http_version 1.1;
|
|
||||||
gzip_comp_level 2;
|
|
||||||
gzip_types text/plain text/css text/javascript application/json application/javascript application/x-javascript application/xml;
|
|
||||||
gzip_vary on;
|
|
||||||
|
|
||||||
include /usr/local/nginx/conf.d/*.conf;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
@ -1,135 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# 用途:配置tracker \ storage的配置文件参数,liyanjing 2022.08.10
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
# 1. tracker 主要参数,生产环境中建议更改一下端口
|
|
||||||
tracker_port=22122
|
|
||||||
# 实现互备,两台tracker就够了
|
|
||||||
tracker_server="tracker_server = 172.16.100.90:$tracker_port\ntracker_server = 172.16.100.91:$tracker_port"
|
|
||||||
|
|
||||||
# 格式:<id> <group_name> <ip_or_hostname
|
|
||||||
storage_ids="
|
|
||||||
100001 group1 172.16.100.90
|
|
||||||
100002 group2 172.16.100.91
|
|
||||||
"
|
|
||||||
|
|
||||||
# 设置tracker访问IP限制,避免谁都能上传文件,默认是allow_hosts = *
|
|
||||||
allow_hosts="allow_hosts = 172.16.100.[85-91,83]\n"
|
|
||||||
|
|
||||||
# 2. local storage 主要参数,生产环境中建议更改一下端口
|
|
||||||
storage_group_name="group1"
|
|
||||||
storage_server_port=23000
|
|
||||||
store_path_count=1 #文件存储目录的个数,存储目录约定为/data/fastdfs/upload/path0~n
|
|
||||||
|
|
||||||
#==================以下是方法体================================
|
|
||||||
function tracker_confset() {
|
|
||||||
|
|
||||||
sed -i "s|^port =.*$|port = $tracker_port|g" ./conf/tracker.conf
|
|
||||||
# use storage ID instead of IP address
|
|
||||||
sed -i "s|^use_storage_id =.*$|use_storage_id = true|g" ./conf/tracker.conf
|
|
||||||
|
|
||||||
cat > ./conf/storage_ids.conf << EOF
|
|
||||||
# <id> <group_name> <ip_or_hostname[:port]>
|
|
||||||
#
|
|
||||||
# id is a natural number (1, 2, 3 etc.),
|
|
||||||
# 6 bits of the id length is enough, such as 100001
|
|
||||||
#
|
|
||||||
# storage ip or hostname can be dual IPs seperated by comma,
|
|
||||||
# one is an inner (intranet) IP and another is an outer (extranet) IP,
|
|
||||||
# or two different types of inner (intranet) IPs
|
|
||||||
# for example: 192.168.2.100,122.244.141.46
|
|
||||||
# another eg.: 192.168.1.10,172.17.4.21
|
|
||||||
#
|
|
||||||
# the port is optional. if you run more than one storaged instances
|
|
||||||
# in a server, you must specified the port to distinguish different instances.
|
|
||||||
|
|
||||||
#100001 group1 192.168.0.196
|
|
||||||
#100002 group1 192.168.0.197
|
|
||||||
$storage_ids
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# 设置tracker访问IP限制,避免谁都能上传文件,默认是allow_hosts = *
|
|
||||||
#sed -i '/^allow_hosts/{N;/^allow_hosts/s/.*/'"${allow_hosts}"'/}' ./conf/tracker.conf
|
|
||||||
}
|
|
||||||
|
|
||||||
function storage_confset() {
|
|
||||||
|
|
||||||
#storage.conf 替换参数
|
|
||||||
sed -i "s|^port =.*$|port = $storage_server_port|g" ./conf/storage.conf
|
|
||||||
sed -i "s|^group_name =.*$|group_name = $storage_group_name|g" ./conf/storage.conf
|
|
||||||
|
|
||||||
sed -i "s|^store_path_count =.*$|store_path_count = $store_path_count|g" ./conf/storage.conf
|
|
||||||
arr_store_path="store_path0 = /data/fastdfs/upload/path0"
|
|
||||||
for((i=1;i<$store_path_count;i++));
|
|
||||||
do
|
|
||||||
arr_store_path="$arr_store_path \nstore_path$i = /data/fastdfs/upload/path$i"
|
|
||||||
done
|
|
||||||
|
|
||||||
sed -i '/^store_path[1-9] =.*$/d' ./conf/storage.conf
|
|
||||||
sed -i '/^#store_path[0-9] =.*$/d' ./conf/storage.conf
|
|
||||||
sed -i "s|^store_path0 =.*$|$arr_store_path|g" ./conf/storage.conf
|
|
||||||
|
|
||||||
sed -i "/^tracker_server =/{N;/^tracker_server =/s/.*/$tracker_server/}" ./conf/storage.conf
|
|
||||||
|
|
||||||
#mod_fastdfs.conf 替换参数
|
|
||||||
sed -i "/^tracker_server/{N;/^tracker_server/s/.*/$tracker_server/}" ./conf/mod_fastdfs.conf
|
|
||||||
sed -i "s|^storage_server_port=.*$|storage_server_port=$storage_server_port|g" ./conf/mod_fastdfs.conf
|
|
||||||
sed -i "s|^group_name=.*$|group_name=$storage_group_name|g" ./conf/mod_fastdfs.conf
|
|
||||||
sed -i "s|^url_have_group_name =.*$|url_have_group_name = true|g" ./conf/mod_fastdfs.conf
|
|
||||||
sed -i "s|^store_path_count=.*$|store_path_count=$store_path_count|g" ./conf/mod_fastdfs.conf
|
|
||||||
sed -i '/^store_path[1-9].*/d' ./conf/mod_fastdfs.conf
|
|
||||||
sed -i "s|^store_path0.*|$arr_store_path|g" ./conf/mod_fastdfs.conf
|
|
||||||
sed -i "s|^use_storage_id =.*$|use_storage_id = true|g" ./conf/mod_fastdfs.conf
|
|
||||||
|
|
||||||
#client.conf 当需要使用fastdfs自带的客户端时,更改此文件
|
|
||||||
sed -i "/^tracker_server/{N;/^tracker_server/s/.*/$tracker_server/}" ./conf/client.conf
|
|
||||||
sed -i "s|^use_storage_id =.*$|use_storage_id = true|g" ./conf/client.conf
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
mode_number=1
|
|
||||||
function chose_info_print() {
|
|
||||||
echo -e "\033[32m 请先设置好本脚本的tracker \ storage 的参数变量,然后再选择:
|
|
||||||
[1] 配置 tracker
|
|
||||||
|
|
||||||
[2] 配置 storage\033[0m"
|
|
||||||
}
|
|
||||||
|
|
||||||
#执行
|
|
||||||
function run() {
|
|
||||||
|
|
||||||
#1.屏幕打印出选择项
|
|
||||||
chose_info_print
|
|
||||||
|
|
||||||
read -p "please input number 1 to 2: " mode_number
|
|
||||||
if [[ ! $mode_number =~ [0-2]+ ]]; then
|
|
||||||
echo -e "\033[31merror! the number you input isn't 1 to 2\n\033[0m"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
#2. 执行
|
|
||||||
case ${mode_number} in
|
|
||||||
1)
|
|
||||||
#echo "设置tracker"
|
|
||||||
tracker_confset
|
|
||||||
;;
|
|
||||||
2)
|
|
||||||
#echo "设置storage"
|
|
||||||
storage_confset
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo -e "\033[31merror! the number you input isn't 1 to 2\n\033[0m"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
echo -e "\033[36m ${input_parameter} 配置文件设置完毕,建议人工复核一下\033[0m"
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
run
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,164 +0,0 @@
|
||||||
### 创建镜像
|
|
||||||
|
|
||||||
参考:
|
|
||||||
https://github.com/qbanxiaoli/fastdfs/blob/master/Dockerfile
|
|
||||||
https://github.com/ygqygq2/fastdfs-nginx/blob/master/Dockerfile
|
|
||||||
|
|
||||||
# docker build -t lyj/fastdfs:6.09-alpine .
|
|
||||||
# docker save a3f007114480 -o /data/docker_images/lyj-fastdfs-6.09-alpine.tar
|
|
||||||
# docker load -i /data/docker_images/lyj-fastdfs-6.09-alpine.tar && docker tag a3f007114480 lyj/fastdfs:6.09-alpine
|
|
||||||
|
|
||||||
# docker build -t lyj/fastdfs:6.08-alpine .
|
|
||||||
# docker save 646a2c0265ca -o /data/docker_images/lyj-fastdfs-6.08-alpine.tar
|
|
||||||
# docker load -i /data/docker_images/lyj-fastdfs-6.08-alpine.tar && docker tag 646a2c0265ca lyj/fastdfs:6.08-alpine
|
|
||||||
|
|
||||||
备注:可以使用centos基础镜像。
|
|
||||||
|
|
||||||
### 一、tracker 部署
|
|
||||||
|
|
||||||
实现互备,两台tracker就够了,生产环境中注意更换端口
|
|
||||||
|
|
||||||
>[danger]推荐使用 setting_conf.sh.sh 来设置配置文件的参数,打开改脚本,修改tracker\storage的主要参数
|
|
||||||
|
|
||||||
```bash
|
|
||||||
1、创建宿主机挂载目录
|
|
||||||
# mkdir -p /data/docker/fastdfs/tracker/{conf,data} #conf为tracker配置文件目录,data为tracker基础数据存储目录
|
|
||||||
|
|
||||||
2、tracker 配置文件
|
|
||||||
+ 我挂载的配置文件目录,将部署操作说明书\fastdfs-conf\conf中的所有配置文件上传到服务器,tracker只用到tracker.conf和storage_ids.conf,其他文件不用管。
|
|
||||||
|
|
||||||
+ 使用 ID 取代 ip,作为storage server的标识,强烈建议使用此方式,例如方便今后的迁移。use_storage_id = false 改为true, 并在storage_ids.conf填写所有storage的id、所属组名,ip
|
|
||||||
|
|
||||||
+ 为了安全,可限定连接到此tracker的ip 范围,默认是allow_hosts = *
|
|
||||||
|
|
||||||
+ reserved_storage_space storage为系统其他应用留的空间,可以用绝对值(10 G或10240 M)或者百分比(V4开始支持百分比方式),网友说“最小阀值不要设置2%(有坑),设置5%可以”。
|
|
||||||
## 同组中只要有一台服务器达到这个标准了,就不能上传文件了
|
|
||||||
## no unit for byte(B) 不加单位默认是byte,例如2G=2147483648byte,reserved_storage_space = 2147483648byte
|
|
||||||
## 经实践6.08版本配置百分比可以;绝对值不加单位默认byte可以;绝对值加单位报错(v6.0.9中修复了) ERROR - file: shared_func.c, line: 2449, unkown byte unit: MB, input string: 10240 MB
|
|
||||||
## 预留空间配置为绝对值,数值和单位之间不能有空格。
|
|
||||||
|
|
||||||
...请阅读参数说明,调优其他参数
|
|
||||||
|
|
||||||
3、启动tracker容器
|
|
||||||
# docker run -d --net=host --restart=always --name=tracker \
|
|
||||||
-v /etc/localtime:/etc/localtime \
|
|
||||||
-v /data/docker/fastdfs/tracker/data:/data/fastdfs_data \
|
|
||||||
-v /data/docker/fastdfs/tracker/conf:/etc/fdfs \
|
|
||||||
-d lyj/fastdfs:6.09-alpine tracker
|
|
||||||
|
|
||||||
|
|
||||||
docker run -d --net=host --restart=always --name=ttmp \
|
|
||||||
-d lyj/fastdfs:6.09-alpine tracker
|
|
||||||
|
|
||||||
4、防火墙中打开tracker端口(默认的22122),生产环境中注意更换端口
|
|
||||||
# firewall-cmd --zone=public --add-port=22122/tcp --permanent
|
|
||||||
# firewall-cmd --reload
|
|
||||||
|
|
||||||
```
|
|
||||||
|
|
||||||
>去除注释和空行:egrep -v "#|^$" /data/docker/fastdfs/tracker/conf/fdfs/tracker.conf >/data/docker/fastdfs/tracker/conf/fdfs/tracker.conf.bak
|
|
||||||
|
|
||||||
### 二、storage 部署
|
|
||||||
|
|
||||||
+ fastdfs 约定:`同组内的storage server端口必须一致,建议挂载存储目录个数、路径要相同`
|
|
||||||
|
|
||||||
+ 为了互相备份,一个group内有两台storage即可
|
|
||||||
|
|
||||||
+ fastdfs 镜像 已封装了 nginx\fastdfs扩展模块 作为web服务,向外提供http文件访问
|
|
||||||
|
|
||||||
```bash
|
|
||||||
1、创建宿主机挂载目录
|
|
||||||
# mkdir -p /data/docker/fastdfs/storage/{conf,metadata,nginx_conf,nginx_conf.d} #conf存放storge配置文件,metadata为storage基础数据
|
|
||||||
# mkdir -p /data/docker/fastdfs/upload/{path0,path1,path2,path3} #存储上传的文件,当有多块硬盘时,挂载到相应目录上即可 /data/fastdfs/upload/path0~n
|
|
||||||
```
|
|
||||||
|
|
||||||
>[danger]推荐使用 conf_setting.sh 来设置配置文件的参数
|
|
||||||
|
|
||||||
```bash
|
|
||||||
2、配置文件我挂载的是目录,因此将fastdfs-conf\的所有配置文件上传到服务器
|
|
||||||
用到的配置文件:storage.conf(storage配置文件),mod_fastdfs.conf(http文件访问的扩展模块的配置文件),nginx_conf/nginx.conf 与 nginx_conf.d/default.conf(nginx的配置文件)
|
|
||||||
|
|
||||||
配置文件需要调整的主要参数:
|
|
||||||
1. storage.conf
|
|
||||||
group_name = group1 # 指定storage所属组
|
|
||||||
base_path = /data/fastdfs_data # Storage 基础数据和日志的存储目录
|
|
||||||
store_path_count = 1 # 上传文件存放目录的个数
|
|
||||||
store_path0 = /data/fastdfs/upload/path0 # 逐一配置 store_path_count 个路径,索引号基于 0。
|
|
||||||
tracker_server = 172.16.100.90:22122 # tracker_server 的列表 ,有多个时,每个 tracker server 写一行
|
|
||||||
allow_hosts = * ## 允许连接本storage server的IP地址列表,为了安全期间,可以设置。
|
|
||||||
2. mod_fastdfs.conf
|
|
||||||
tracker_server = 172.16.100.90:22122 # tracker服务器的IP地址以及端口号,多个时,每个tracker server写一行
|
|
||||||
storage_server_port = 23000 # 本地storage的端口号,fastdfs约定"同组下的storage端口必须一致"
|
|
||||||
group_name = group1 # 本地storage的组名
|
|
||||||
url_have_group_name = true # url中是否有group名,默认是false,这个参数必须正确设置,否则文件不能被下载到
|
|
||||||
store_path_count = 1 # 本地storage的存储路径个数,必须和storage.conf中的配置一样
|
|
||||||
store_path0 = /data/fastdfs/upload/path0 #本地storage的存储路径,必须和storage.conf中的配置一样
|
|
||||||
|
|
||||||
## 如果在此存储服务器上支持多组时,有几组就设置几组。单组为0.
|
|
||||||
## 通常一台机器运行一个storage就行,没有必要运行多个group的storage,因为stroage本身支持多存储目录的
|
|
||||||
## 使用docker 一个容器运行一个storage,此处就不用管了
|
|
||||||
group_count = 0
|
|
||||||
#[group1]
|
|
||||||
#group_name=group1
|
|
||||||
#storage_server_port=23000
|
|
||||||
#store_path_count=1
|
|
||||||
#store_path0=/data/fastdfs/upload/path0
|
|
||||||
```
|
|
||||||
```bash
|
|
||||||
3. http.conf 当需要开启token时,更改此文件
|
|
||||||
4. mime.types 资源的媒体类型,当文件的后缀在此文件中找不到时,需要添加。
|
|
||||||
5. client.conf 当需要使用fastdfs自带的客户端时,更改此文件
|
|
||||||
tracker_server = 172.16.100.90:22122
|
|
||||||
6. nginx_conf.d/default.conf
|
|
||||||
# 将http文件访问请求反向代理给扩展模块
|
|
||||||
location ~/group[0-9]/ {
|
|
||||||
ngx_fastdfs_module;
|
|
||||||
}
|
|
||||||
|
|
||||||
# location ~ /group1/M00 {
|
|
||||||
# alias /data/fastdfs/upload/path0;
|
|
||||||
# ngx_fastdfs_module;
|
|
||||||
# }
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
```bash
|
|
||||||
3、启动 storage 容器
|
|
||||||
# docker run -d --net=host --restart always --name=storage1_1 \
|
|
||||||
--privileged=true \
|
|
||||||
-v /etc/localtime:/etc/localtime \
|
|
||||||
-v /data/docker/fastdfs/storage/metadata:/data/fastdfs_data \
|
|
||||||
-v /data/docker/fastdfs/storage/conf:/etc/fdfs \
|
|
||||||
-v /data/docker/fastdfs/upload:/data/fastdfs/upload \
|
|
||||||
-v /data/docker/fastdfs/storage/nginx_conf/nginx.conf:/usr/local/nginx/conf/nginx.conf \
|
|
||||||
-v /data/docker/fastdfs/storage/nginx_conf.d:/usr/local/nginx/conf.d \
|
|
||||||
-d lyj/fastdfs:6.09-alpine storage
|
|
||||||
|
|
||||||
防火墙中打开storage服务端口(默认的23000,生产环境中注意更换端口),nginx的端口9088
|
|
||||||
# firewall-cmd --zone=public --add-port=23000/tcp --permanent
|
|
||||||
# firewall-cmd --zone=public --add-port=9088/tcp --permanent
|
|
||||||
# firewall-cmd --reload
|
|
||||||
|
|
||||||
4、 查看下集群运行状态
|
|
||||||
# docker exec -it storage1_1 sh
|
|
||||||
# /usr/bin/fdfs_monitor /etc/fdfs/storage.conf
|
|
||||||
|
|
||||||
文件访问:http://172.16.100.90:9088/group1/M00/00/00/oYYBAGMi4zGAYNoxABY-esN9nNw502.jpg
|
|
||||||
```
|
|
||||||
|
|
||||||
文件上传demo《E:\gitplace\springboot-fastdfs》
|
|
||||||
|
|
||||||
5、nginx 日期定期切分和过期清理
|
|
||||||
生产环境中一般会在storage nginx前再加一层代理,我这里设置access_log off; 不记录日志了,调试时可以打开。
|
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
6、http 文件访问 负载入口高可用
|
|
||||||
nginx + keepalived
|
|
||||||
nginx反向代理storage文件的配置:《E:\工具箱\08. docker\3.docker_container_install\nginx\(lvs-webpage-api-oss)default.conf》
|
|
||||||
|
|
||||||
7、扩展,增加storage
|
|
||||||
1. 若使用了storage_ids.conf,则需要修改所有的tracker的storage_ids.conf,填写一行 storage id,注意奥"要重启tracker才能生效"。
|
|
||||||
2. storage 部署,见上面。
|
|
||||||
|
|
||||||
8、使用shell脚本调client 删除历史文件
|
|
||||||
|
|
@ -1,141 +0,0 @@
|
||||||
fastdfs源自bbs论坛的问题整理:http://bbs.chinaunix.net/forum-240-1.html
|
|
||||||
|
|
||||||
##### Q0、上传文件,如何选择存储地址的?
|
|
||||||
|
|
||||||
tracker是各协调器, 是如何查询存储地址返回给客户端的?请阅读《fastdfs\1、fastdf配置文件参数解释\tracker.conf参数说明.txt》
|
|
||||||
|
|
||||||
```bash
|
|
||||||
1. client上传文件 <--指定\不指定group--> tracker{选择group ---> 选择 group 中的哪台storage --->选择storage server 中的哪个目录}
|
|
||||||
|
|
||||||
2. Client拿着地址直接和对应的Storage通讯,将文件上传到该Storage。
|
|
||||||
```
|
|
||||||
>[danger]备注:tracker.conf中的 reserved_storage_space 参数是storage为系统、其他应用保留的空间,若空间不足,则上传失败。
|
|
||||||
|
|
||||||
|
|
||||||
##### Q1、fastdfs的版本号如何查看
|
|
||||||
/usr/bin/fdfs_monitor /etc/fdfs/storage.conf 或打开tracker的基础数据存储文件storage_servers_new.dat
|
|
||||||
|
|
||||||
##### Q2、同一个集群内,相互关系
|
|
||||||
- cluster里每个tracker之间是完全对等的,所有的tracker都接受stroage的心跳信息,生成元数据信息来提供读写服务。
|
|
||||||
> 2.03以后,tracker server之间会有通信。比如解决: 新增加一台tracker server时,新的tracker server会自动向老的tracker server获取系统数据文件。。
|
|
||||||
- 同组内的storage server之间,都是对等关系,不存在主从关系。
|
|
||||||
- 组与组之间的storage都是独立的,不同组的storage server之间不会相互通信。
|
|
||||||
|
|
||||||
---
|
|
||||||
##### Q3、备份机制
|
|
||||||
FastDFS采用了分组存储,一个组可以由一台或多台storage存储服务器组成,同组内的storage文件都是相同的,组中的多台storage服务器起到相互备份和负载均衡的作用。
|
|
||||||
|
|
||||||
---
|
|
||||||
##### Q4、storage和组的对应关系
|
|
||||||
一个storage只能属于一个group,组名在storage server上配置,由storage server主动向tracker server报告其组名和存储空间等信息。
|
|
||||||
|
|
||||||
---
|
|
||||||
##### Q5、storage 能连接几个tracker
|
|
||||||
在storage server上配置它要连接的tracker server,可以配置1个或多个。 当storage配置连接2个以上tracker时,tracker群起到负载均衡作用。
|
|
||||||
|
|
||||||
>备注:storage server的信息在tracker上全部缓存到内存中的,支持的分组数,理论上取决于tracker server的内存大小。所以支持上千上万个组没有一点问题。
|
|
||||||
|
|
||||||
>[danger]提醒:在一个集群中,正确的做法是“storage应连接所有的tracker” 万一有个别storage server没有绑定所有tracker server,也不会出现问题。
|
|
||||||
|
|
||||||
---
|
|
||||||
##### Q6、一台机器上运行几个storage
|
|
||||||
通常一台机器只启动一个storage节点(即跑一个group);根据服务器情况(比如多磁盘挂载)、或文件要隔离存储时,可以运行多个storage,但是没必要,因为storage支持多目录挂载.
|
|
||||||
|
|
||||||
>[danger]注意: 同集群内,同组下的storage 端口好必须相同,因此单台上只能运行属于不同组的storage.
|
|
||||||
---
|
|
||||||
##### Q7、一台机器上多个磁盘时,如何使用
|
|
||||||
如果我一台机器多个硬盘挂到不同的目录,不要做RAID,每个硬盘作为一个mount point,直接挂载单盘使用即可
|
|
||||||
- 可以按一个组,运行一个storage,设置多个store_path(索引从0开始)对应不同的磁盘目录。--->推荐
|
|
||||||
- 也按多个组使用,即运行多个storage,每个组管理一个硬盘(mount point)。--->没必要这样做,因为storage已经可以管理多块硬盘了
|
|
||||||
|
|
||||||
> 备注:storage server支持多个路径(例如磁盘)存放文件,为了提高IO性能,不同的磁盘可能挂不同的目录
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
##### Q8、同组内的storage服务器的空间大小不一样时,会出现什么问题
|
|
||||||
同一个卷的存储空间以group内容量最小的storage为准,所以建议同一个GROUP中的多个storage尽量配置相同,即**store_path_count个数、存放文件的目录磁盘大小要相同,目录名称可以不相同**。
|
|
||||||
> 论坛帖子:若同一个卷下的各storage配置不同,某个服务器有空间,但是不能再上传文件的现象。http://bbs.chinaunix.net/forum.php?mod=viewthread&tid=1941456&extra=page%3D1%26filter%3Ddigest%26digest%3D1
|
|
||||||
|
|
||||||
---
|
|
||||||
##### Q9、每个目录下存放的文件数有限制吗。
|
|
||||||
没有限制,能不能上传取决于剩余空间。
|
|
||||||
> 备注:storage的缺省情况下,**每个目录下存放100个文件,然后就轮转到下一个目录, 到最后一个目录data/FF/FF后,会跳会第一个目录**。
|
|
||||||
- subdir_count_per_path =256,storage server在初次运行时,会在store_path0~n\data\目录下,自动创建 N * N 个存放文件的子目录。
|
|
||||||
- file_distribute_path_mode配置为0(轮流存放方式),file_distribute_rotate_count = 100,当一个目录下的文件存放的文件数达到本参数值时,后续上传的文件存储到下一个目录中。
|
|
||||||
|
|
||||||
##### Q10、tracker、storage和client配置文件中的http.server_port还要配置吗
|
|
||||||
不用理会这个配置项,HTTP访问文件请使用外部的web server.
|
|
||||||
>[danger] 备注:
|
|
||||||
- fastdfs内置的web server从4.0.5版本就移除了(因为之前自带的HTTP服务较为简单,无法提供负载均衡等高性能服务),而是使用外部web server(apache和nginx)提供http文件访问服务。
|
|
||||||
- 为了解决文件同步延迟的问题,apache或nginx上需要使用FastDFS提供的扩展模块,如nginx的fastdfs-nginx-module
|
|
||||||
- 在每台storage server上部署web server,直接对外提供HTTP服务
|
|
||||||
- tracker server上不需要部署web server
|
|
||||||
|
|
||||||
##### Q11、如何防盗链
|
|
||||||
通过token的方式来实现的防盗链。原贴地址:http://bbs.chinaunix.net/thread-1916999-1-1.html
|
|
||||||
看一下配置文件 mod_fastdfs.conf,里面包含了http.conf,在http.conf中进行防盗链相关设置。
|
|
||||||
|
|
||||||
##### Q12、“海量”小文件会导致文件系统性能急剧下降,请问这里的“海量”大致在什么级别
|
|
||||||
出于性能考虑,我觉得单台机器存储的文件数不要超过1千万吧。
|
|
||||||
> [点击查看原贴地址](点击查看原贴地址:http://bbs.chinaunix.net/thread-2328826-1-48.html "点击查看原贴地址"), 3.0的计划中,提到“海量”小文件会导致文件系统性能急剧下降,乃至崩溃。请问这里的“海量”大致在什么级别,通过扩展主机(不仅仅是磁盘)是否可以解决“海量”小文件带来的性能瓶颈?
|
|
||||||
|
|
||||||
##### Q13、FastDFS扩展模块(fastdfs-nginx-module)支持断点续传吗
|
|
||||||
|
|
||||||
版本V1.08,增加了支持断点续传
|
|
||||||
|
|
||||||
##### Q14、配置了Nginx的FDFS扩展模块,可以通过nginx访问文件,mod_fastdfs.conf中的tracker_server配置项有什么作用?
|
|
||||||
|
|
||||||
扩展模块在web server启动时,连接tracker server,以获得2个配置参数,
|
|
||||||
如果连不上时或者获取失败,会使用缺省值:
|
|
||||||
+ storage_sync_file_max_delay:文件同步的最大延迟,缺省为1天
|
|
||||||
+ storage_sync_file_max_time:同步单个文件需要的最大时间,缺省为5分钟。
|
|
||||||
|
|
||||||
##### Q15、扩展模块有哪些注意事项
|
|
||||||
配置文件/etc/fdfs/mod_fastdfs.conf,参数url_have_group_name:URL中是否包含了group name。这个参数必须正确设置,否则文件不能被下载到
|
|
||||||
|
|
||||||
##### Q16、FastDFS是否支持文件修改呢?
|
|
||||||
V3.08开始支持文件修改了。
|
|
||||||
|
|
||||||
##### Q17、如果你需要相同文件内容的文件只保存一份时,怎么办?
|
|
||||||
|
|
||||||
结合FastDHT使用,http://localhost:8181/docs/fastdfs/fastdfs-1dtfs5fe93h60
|
|
||||||
|
|
||||||
##### Q18、只要知道tracker的服务器IP和端口,任何都可以使用api上传文件,这样是否会有恶意上传的问题
|
|
||||||
|
|
||||||
可以指定访问限制,tracker.conf,storage.conf,添加访问IP限制:(例)
|
|
||||||
```bash
|
|
||||||
# allow_hosts can ocur more than once, host can be hostname or ip address,
|
|
||||||
# "*" means match all ip addresses, can use range like this: 10.0.1.[1-15,20] or
|
|
||||||
# host[01-08,20-25].domain.com, for example:
|
|
||||||
# allow_hosts=10.0.1.[1-15,20]
|
|
||||||
# allow_hosts=host[01-08,20-25].domain.com
|
|
||||||
#allow_hosts=*
|
|
||||||
allow_hosts=222.222.222.[152-154]
|
|
||||||
allow_hosts=111.111.111.111
|
|
||||||
|
|
||||||
```
|
|
||||||
Q19、部署哪些事项要注意?
|
|
||||||
|
|
||||||
0. tracker 只管理集群拓扑数据,不存储任何文件索引,对硬件配置要求较低,为了实现互备,**两台tracker就够了。若集群规模较小,可复用storage机器**
|
|
||||||
|
|
||||||
1. 在tracker的配置文件tracker.conf中设置好预留合适的空间.
|
|
||||||
|
|
||||||
2. fastdfs存储文件是直接基于操作系统的文件系统的,**storage的性能瓶颈通常表现在磁盘IO**。为了充分利用文件系统的cache已加快文件访问,**推荐storage配置较大内存**,尤其在众多热点文件的场合下,大量IO吞吐也会消耗cpu
|
|
||||||
|
|
||||||
3. storage,为了互相备份,**一个group内有两台storage即可**
|
|
||||||
|
|
||||||
4. storage 为了充分利用磁盘,推荐不做RAID,直接挂载单块硬盘,每块硬盘mount为一个路径,作为storage的一个store_path。
|
|
||||||
|
|
||||||
5. **同组内的storage 端口号必须相同**,建议挂载存储个数相同、空间大小相同;同一主机上可以运行多个不同组的storage.
|
|
||||||
|
|
||||||
6. 同组内的storage 若有多个tracker,应当配置上所有的tracker地址
|
|
||||||
|
|
||||||
7. fastdfs从4.0.5开始去除了http文件下载功能,需要外部的web server,为了解决文件同步延迟的问题,apache或nginx上需要使用FastDFS提供的扩展模块,如nginx的fastdfs-nginx-module
|
|
||||||
- 在每台storage server上部署web server,直接对外提供HTTP服务
|
|
||||||
- tracker server上不需要部署web server
|
|
||||||
- 每个组必须有一个nginx,提供http文件访问服务.
|
|
||||||
|
|
||||||
8. 海量小文件场景,建议使用文件合并存储特性,在tracker.conf 设置 use_trunck_file=true,**如果一个group存储的文件数不超过一千万,就没有必要使用这个特性**。
|
|
||||||
|
|
||||||
9. 为了避免不必要的干扰集群安全考虑,**建议使用storage server id 方式。** tracker.conf 设置 use_storage_id=true 并在storage_ids.conf填写所有storage的id、所属组名,ip。这样做迁移很容易。
|
|
||||||
|
|
||||||
|
|
@ -33,8 +33,6 @@ RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel z
|
||||||
&& make && make install \
|
&& make && make install \
|
||||||
&& chmod +x /home/fastdfs.sh
|
&& chmod +x /home/fastdfs.sh
|
||||||
|
|
||||||
RUN ln -s /usr/local/src/fastdfs/init.d/fdfs_trackerd /etc/init.d/fdfs_trackerd \
|
|
||||||
&& ln -s /usr/local/src/fastdfs/init.d/fdfs_storaged /etc/init.d/fdfs_storaged
|
|
||||||
|
|
||||||
# export config
|
# export config
|
||||||
VOLUME /etc/fdfs
|
VOLUME /etc/fdfs
|
||||||
|
|
|
||||||
|
|
@ -11,84 +11,26 @@ ADD conf/nginx.conf /etc/fdfs/
|
||||||
ADD conf/mod_fastdfs.conf /etc/fdfs
|
ADD conf/mod_fastdfs.conf /etc/fdfs
|
||||||
|
|
||||||
# run
|
# run
|
||||||
# install packages
|
RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl-devel wget vim -y \
|
||||||
RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl-devel wget vim -y
|
&& cd /usr/local/src \
|
||||||
# git clone libfastcommon / libserverframe / fastdfs / fastdfs-nginx-module
|
&& git clone https://github.com/happyfish100/libfastcommon.git --depth 1 \
|
||||||
RUN cd /usr/local/src \
|
&& git clone https://github.com/happyfish100/fastdfs.git --depth 1 \
|
||||||
&& git clone https://gitee.com/fastdfs100/libfastcommon.git \
|
&& git clone https://github.com/happyfish100/fastdfs-nginx-module.git --depth 1 \
|
||||||
&& git clone https://gitee.com/fastdfs100/libserverframe.git \
|
&& wget http://nginx.org/download/nginx-1.15.4.tar.gz \
|
||||||
&& git clone https://gitee.com/fastdfs100/fastdfs.git \
|
&& tar -zxvf nginx-1.15.4.tar.gz \
|
||||||
&& git clone https://gitee.com/fastdfs100/fastdfs-nginx-module.git \
|
&& mkdir /home/dfs \
|
||||||
&& pwd && ls
|
&& cd /usr/local/src/ \
|
||||||
# build libfastcommon / libserverframe / fastdfs
|
&& cd libfastcommon/ \
|
||||||
RUN mkdir /home/dfs \
|
&& ./make.sh && ./make.sh install \
|
||||||
&& cd /usr/local/src \
|
&& cd ../ \
|
||||||
&& pwd && ls \
|
&& cd fastdfs/ \
|
||||||
&& cd libfastcommon/ \
|
&& ./make.sh && ./make.sh install \
|
||||||
&& ./make.sh && ./make.sh install \
|
&& cd ../ \
|
||||||
&& cd ../ \
|
&& cd nginx-1.15.4/ \
|
||||||
&& cd libserverframe/ \
|
&& ./configure --add-module=/usr/local/src/fastdfs-nginx-module/src/ \
|
||||||
&& ./make.sh && ./make.sh install \
|
&& make && make install \
|
||||||
&& cd ../ \
|
&& chmod +x /home/fastdfs.sh
|
||||||
&& cd fastdfs/ \
|
|
||||||
&& ./make.sh && ./make.sh install
|
|
||||||
# download nginx and build with fastdfs-nginx-module
|
|
||||||
# 推荐 NGINX 版本:
|
|
||||||
# NGINX_VERSION=1.16.1
|
|
||||||
# NGINX_VERSION=1.17.10
|
|
||||||
# NGINX_VERSION=1.18.0
|
|
||||||
# NGINX_VERSION=1.19.10
|
|
||||||
# NGINX_VERSION=1.20.2
|
|
||||||
# NGINX_VERSION=1.21.6
|
|
||||||
# NGINX_VERSION=1.22.1
|
|
||||||
# NGINX_VERSION=1.23.3
|
|
||||||
# 可在 docker build 命令中指定使用的 nginx 版本, 例如:
|
|
||||||
# docker build --build-arg NGINX_VERSION="1.16.1" -t happyfish100/fastdfs:latest -t happyfish100/fastdfs:6.09.01 .
|
|
||||||
# docker build --build-arg NGINX_VERSION="1.19.10" -t happyfish100/fastdfs:latest -t happyfish100/fastdfs:6.09.02 .
|
|
||||||
# docker build --build-arg NGINX_VERSION="1.23.3" -t happyfish100/fastdfs:latest -t happyfish100/fastdfs:6.09.03 .
|
|
||||||
ARG NGINX_VERSION=1.16.1
|
|
||||||
RUN cd /usr/local/src \
|
|
||||||
&& NGINX_PACKAGE=nginx-${NGINX_VERSION} \
|
|
||||||
&& NGINX_FILE=${NGINX_PACKAGE}.tar.gz \
|
|
||||||
&& wget http://nginx.org/download/${NGINX_FILE} \
|
|
||||||
&& tar -zxvf ${NGINX_FILE} \
|
|
||||||
&& pwd && ls \
|
|
||||||
&& cd /usr/local/src \
|
|
||||||
&& cd ${NGINX_PACKAGE}/ \
|
|
||||||
&& ./configure --add-module=/usr/local/src/fastdfs-nginx-module/src/ \
|
|
||||||
&& make && make install \
|
|
||||||
&& chmod +x /home/fastdfs.sh
|
|
||||||
|
|
||||||
# 原来的 RUN 语句太复杂, 不利于 docker build 时使用多阶段构建缓存
|
|
||||||
# RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl-devel wget vim -y \
|
|
||||||
# && NGINX_VERSION=1.19.9 \
|
|
||||||
# && NGINX_PACKAGE=nginx-${NGINX_VERSION} \
|
|
||||||
# && NGINX_FILE=${NGINX_PACKAGE}.tar.gz \
|
|
||||||
# && cd /usr/local/src \
|
|
||||||
# && git clone https://gitee.com/fastdfs100/libfastcommon.git \
|
|
||||||
# && git clone https://gitee.com/fastdfs100/libserverframe.git \
|
|
||||||
# && git clone https://gitee.com/fastdfs100/fastdfs.git \
|
|
||||||
# && git clone https://gitee.com/fastdfs100/fastdfs-nginx-module.git \
|
|
||||||
# && wget http://nginx.org/download/${NGINX_FILE} \
|
|
||||||
# && tar -zxvf ${NGINX_FILE} \
|
|
||||||
# && mkdir /home/dfs \
|
|
||||||
# && cd /usr/local/src/ \
|
|
||||||
# && cd libfastcommon/ \
|
|
||||||
# && ./make.sh && ./make.sh install \
|
|
||||||
# && cd ../ \
|
|
||||||
# && cd libserverframe/ \
|
|
||||||
# && ./make.sh && ./make.sh install \
|
|
||||||
# && cd ../ \
|
|
||||||
# && cd fastdfs/ \
|
|
||||||
# && ./make.sh && ./make.sh install \
|
|
||||||
# && cd ../ \
|
|
||||||
# && cd ${NGINX_PACKAGE}/ \
|
|
||||||
# && ./configure --add-module=/usr/local/src/fastdfs-nginx-module/src/ \
|
|
||||||
# && make && make install \
|
|
||||||
# && chmod +x /home/fastdfs.sh
|
|
||||||
|
|
||||||
RUN ln -s /usr/local/src/fastdfs/init.d/fdfs_trackerd /etc/init.d/fdfs_trackerd \
|
|
||||||
&& ln -s /usr/local/src/fastdfs/init.d/fdfs_storaged /etc/init.d/fdfs_storaged
|
|
||||||
|
|
||||||
# export config
|
# export config
|
||||||
VOLUME /etc/fdfs
|
VOLUME /etc/fdfs
|
||||||
|
|
|
||||||
69
fastdfs.spec
69
fastdfs.spec
|
|
@ -3,49 +3,43 @@
|
||||||
%define FDFSClient libfdfsclient
|
%define FDFSClient libfdfsclient
|
||||||
%define FDFSClientDevel libfdfsclient-devel
|
%define FDFSClientDevel libfdfsclient-devel
|
||||||
%define FDFSTool fastdfs-tool
|
%define FDFSTool fastdfs-tool
|
||||||
%define FDFSConfig fastdfs-config
|
%define FDFSVersion 6.0.5
|
||||||
%define CommitVersion %(echo $COMMIT_VERSION)
|
%define CommitVersion %(echo $COMMIT_VERSION)
|
||||||
|
|
||||||
Name: %{FastDFS}
|
Name: %{FastDFS}
|
||||||
Version: 6.12.2
|
Version: %{FDFSVersion}
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: FastDFS server and client
|
Summary: FastDFS server and client
|
||||||
License: GPL
|
License: GPL
|
||||||
Group: Arch/Tech
|
Group: Arch/Tech
|
||||||
URL: https://github.com/happyfish100/fastdfs/
|
URL: http://perso.orange.fr/sebastien.godard/
|
||||||
Source: https://github.com/happyfish100/fastdfs/%{name}-%{version}.tar.gz
|
Source: http://perso.orange.fr/sebastien.godard/%{name}-%{version}.tar.gz
|
||||||
|
|
||||||
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
|
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
|
||||||
BuildRequires: libserverframe-devel >= 1.2.5
|
|
||||||
Requires: %__cp %__mv %__chmod %__grep %__mkdir %__install %__id
|
Requires: %__cp %__mv %__chmod %__grep %__mkdir %__install %__id
|
||||||
Requires: %{FDFSServer} = %{version}-%{release}
|
BuildRequires: libfastcommon-devel >= 1.0.43
|
||||||
Requires: %{FDFSTool} = %{version}-%{release}
|
|
||||||
|
|
||||||
%description
|
%description
|
||||||
This package provides tracker & storage of fastdfs
|
This package provides tracker & storage of fastdfs
|
||||||
commit version: %{CommitVersion}
|
commit version: %{CommitVersion}
|
||||||
|
|
||||||
%package -n %{FDFSServer}
|
%package -n %{FDFSServer}
|
||||||
Requires: libserverframe >= 1.2.5
|
Requires: libfastcommon >= 1.0.43
|
||||||
Requires: %{FDFSConfig}
|
|
||||||
Summary: fastdfs tracker & storage
|
Summary: fastdfs tracker & storage
|
||||||
|
|
||||||
%package -n %{FDFSTool}
|
%package -n %{FDFSTool}
|
||||||
Requires: %{FDFSClient}
|
Requires: libfastcommon
|
||||||
Summary: fastdfs tools
|
Summary: fastdfs tools
|
||||||
|
|
||||||
%package -n %{FDFSClient}
|
%package -n %{FDFSClient}
|
||||||
Requires: libserverframe >= 1.2.5
|
Requires: libfastcommon
|
||||||
Requires: %{FDFSConfig}
|
|
||||||
Summary: The client dynamic library of fastdfs
|
Summary: The client dynamic library of fastdfs
|
||||||
|
|
||||||
%package -n %{FDFSClient}-devel
|
%package -n %{FDFSClient}-devel
|
||||||
Requires: %{FDFSClient}
|
Requires: %{FDFSClient}
|
||||||
Summary: The client header of fastdfs
|
Summary: The client header of fastdfs
|
||||||
|
|
||||||
%package -n %{FDFSConfig}
|
|
||||||
Summary: FastDFS config files for sample
|
|
||||||
|
|
||||||
%description -n %{FDFSServer}
|
%description -n %{FDFSServer}
|
||||||
This package provides tracker & storage of fastdfs
|
This package provides tracker & storage of fastdfs
|
||||||
commit version: %{CommitVersion}
|
commit version: %{CommitVersion}
|
||||||
|
|
@ -62,21 +56,31 @@ commit version: %{CommitVersion}
|
||||||
This package is tools for fastdfs
|
This package is tools for fastdfs
|
||||||
commit version: %{CommitVersion}
|
commit version: %{CommitVersion}
|
||||||
|
|
||||||
%description -n %{FDFSConfig}
|
|
||||||
FastDFS config files for sample
|
|
||||||
commit version: %{CommitVersion}
|
|
||||||
|
|
||||||
%prep
|
%prep
|
||||||
%setup -q
|
%setup -q
|
||||||
|
|
||||||
%build
|
%build
|
||||||
./make.sh clean && ./make.sh
|
# FIXME: I need to fix the upstream Makefile to use LIBDIR et al. properly and
|
||||||
|
# send the upstream maintainer a patch.
|
||||||
|
# add DOCDIR to the configure part
|
||||||
|
./make.sh
|
||||||
|
|
||||||
%install
|
%install
|
||||||
rm -rf %{buildroot}
|
rm -rf %{buildroot}
|
||||||
DESTDIR=$RPM_BUILD_ROOT ./make.sh install
|
DESTDIR=$RPM_BUILD_ROOT ./make.sh install
|
||||||
|
#make install IGNORE_MAN_GROUP=y DOC_DIR=%{_docdir}/%{name}-%{version} INIT_DIR=%{_initrddir}
|
||||||
|
|
||||||
%post
|
#install -m 0644 sysstat.crond %{buildroot}/%{_sysconfdir}/cron.d/sysstat
|
||||||
|
|
||||||
|
#%find_lang %{name}
|
||||||
|
|
||||||
|
%post -n %{FDFSServer}
|
||||||
|
/sbin/chkconfig --add fdfs_trackerd
|
||||||
|
/sbin/chkconfig --add fdfs_storaged
|
||||||
|
|
||||||
|
%preun -n %{FDFSServer}
|
||||||
|
/sbin/chkconfig --del fdfs_trackerd
|
||||||
|
/sbin/chkconfig --del fdfs_storaged
|
||||||
|
|
||||||
%postun
|
%postun
|
||||||
|
|
||||||
|
|
@ -84,29 +88,31 @@ DESTDIR=$RPM_BUILD_ROOT ./make.sh install
|
||||||
#rm -rf %{buildroot}
|
#rm -rf %{buildroot}
|
||||||
|
|
||||||
%files
|
%files
|
||||||
|
#%defattr(-,root,root,-)
|
||||||
%post -n %{FDFSServer}
|
#/usr/local/bin/*
|
||||||
systemctl enable fdfs_trackerd
|
#/usr/local/include/*
|
||||||
systemctl enable fdfs_storaged
|
|
||||||
|
|
||||||
%files -n %{FDFSServer}
|
%files -n %{FDFSServer}
|
||||||
%defattr(-,root,root,-)
|
%defattr(-,root,root,-)
|
||||||
/usr/bin/fdfs_trackerd
|
/usr/bin/fdfs_trackerd
|
||||||
/usr/bin/fdfs_storaged
|
/usr/bin/fdfs_storaged
|
||||||
%config(noreplace) /usr/lib/systemd/system/fdfs_trackerd.service
|
/usr/bin/restart.sh
|
||||||
%config(noreplace) /usr/lib/systemd/system/fdfs_storaged.service
|
/usr/bin/stop.sh
|
||||||
|
/etc/init.d/*
|
||||||
|
/etc/fdfs/tracker.conf.sample
|
||||||
|
/etc/fdfs/storage.conf.sample
|
||||||
|
/etc/fdfs/storage_ids.conf.sample
|
||||||
|
|
||||||
%files -n %{FDFSClient}
|
%files -n %{FDFSClient}
|
||||||
%defattr(-,root,root,-)
|
|
||||||
/usr/lib64/libfdfsclient*
|
/usr/lib64/libfdfsclient*
|
||||||
/usr/lib/libfdfsclient*
|
/usr/lib/libfdfsclient*
|
||||||
|
/etc/fdfs/client.conf.sample
|
||||||
|
|
||||||
%files -n %{FDFSClient}-devel
|
%files -n %{FDFSClient}-devel
|
||||||
%defattr(-,root,root,-)
|
%defattr(-,root,root,-)
|
||||||
/usr/include/fastdfs/*
|
/usr/include/fastdfs/*
|
||||||
|
|
||||||
%files -n %{FDFSTool}
|
%files -n %{FDFSTool}
|
||||||
%defattr(-,root,root,-)
|
|
||||||
/usr/bin/fdfs_monitor
|
/usr/bin/fdfs_monitor
|
||||||
/usr/bin/fdfs_test
|
/usr/bin/fdfs_test
|
||||||
/usr/bin/fdfs_test1
|
/usr/bin/fdfs_test1
|
||||||
|
|
@ -119,11 +125,6 @@ systemctl enable fdfs_storaged
|
||||||
/usr/bin/fdfs_appender_test1
|
/usr/bin/fdfs_appender_test1
|
||||||
/usr/bin/fdfs_append_file
|
/usr/bin/fdfs_append_file
|
||||||
/usr/bin/fdfs_upload_appender
|
/usr/bin/fdfs_upload_appender
|
||||||
/usr/bin/fdfs_regenerate_filename
|
|
||||||
|
|
||||||
%files -n %{FDFSConfig}
|
|
||||||
%defattr(-,root,root,-)
|
|
||||||
%config(noreplace) /etc/fdfs/*.conf
|
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
* Mon Jun 23 2014 Zaixue Liao <liaozaixue@yongche.com>
|
* Mon Jun 23 2014 Zaixue Liao <liaozaixue@yongche.com>
|
||||||
|
|
|
||||||
71
make.sh
71
make.sh
|
|
@ -2,29 +2,23 @@ ENABLE_STATIC_LIB=0
|
||||||
ENABLE_SHARED_LIB=1
|
ENABLE_SHARED_LIB=1
|
||||||
TARGET_PREFIX=$DESTDIR/usr
|
TARGET_PREFIX=$DESTDIR/usr
|
||||||
TARGET_CONF_PATH=$DESTDIR/etc/fdfs
|
TARGET_CONF_PATH=$DESTDIR/etc/fdfs
|
||||||
TARGET_SYSTEMD_PATH=$DESTDIR/usr/lib/systemd/system
|
TARGET_INIT_PATH=$DESTDIR/etc/init.d
|
||||||
|
|
||||||
WITH_LINUX_SERVICE=1
|
WITH_LINUX_SERVICE=1
|
||||||
|
|
||||||
DEBUG_FLAG=0
|
DEBUG_FLAG=1
|
||||||
|
|
||||||
export CC=gcc
|
CFLAGS='-Wall -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE'
|
||||||
CFLAGS='-Wall'
|
|
||||||
GCC_VERSION=$(gcc -dM -E - < /dev/null | grep -w __GNUC__ | awk '{print $NF;}')
|
|
||||||
if [ -n "$GCC_VERSION" ] && [ $GCC_VERSION -ge 7 ]; then
|
|
||||||
CFLAGS="$CFLAGS -Wformat-truncation=0 -Wformat-overflow=0"
|
|
||||||
fi
|
|
||||||
CFLAGS="$CFLAGS -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE"
|
|
||||||
if [ "$DEBUG_FLAG" = "1" ]; then
|
if [ "$DEBUG_FLAG" = "1" ]; then
|
||||||
CFLAGS="$CFLAGS -g -O1 -DDEBUG_FLAG"
|
CFLAGS="$CFLAGS -g -O1 -DDEBUG_FLAG"
|
||||||
else
|
else
|
||||||
CFLAGS="$CFLAGS -g -O3"
|
CFLAGS="$CFLAGS -O3"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -f /usr/include/fastcommon/_os_define.h ]; then
|
if [ -f /usr/include/fastcommon/_os_define.h ]; then
|
||||||
OS_BITS=$(grep -F OS_BITS /usr/include/fastcommon/_os_define.h | awk '{print $NF;}')
|
OS_BITS=$(fgrep OS_BITS /usr/include/fastcommon/_os_define.h | awk '{print $NF;}')
|
||||||
elif [ -f /usr/local/include/fastcommon/_os_define.h ]; then
|
elif [ -f /usr/local/include/fastcommon/_os_define.h ]; then
|
||||||
OS_BITS=$(grep -F OS_BITS /usr/local/include/fastcommon/_os_define.h | awk '{print $NF;}')
|
OS_BITS=$(fgrep OS_BITS /usr/local/include/fastcommon/_os_define.h | awk '{print $NF;}')
|
||||||
else
|
else
|
||||||
OS_BITS=64
|
OS_BITS=64
|
||||||
fi
|
fi
|
||||||
|
|
@ -32,15 +26,7 @@ fi
|
||||||
uname=$(uname)
|
uname=$(uname)
|
||||||
|
|
||||||
if [ "$OS_BITS" -eq 64 ]; then
|
if [ "$OS_BITS" -eq 64 ]; then
|
||||||
if [ $uname = 'Linux' ]; then
|
if [ "$uname" = "Darwin" ]; then
|
||||||
osname=$(cat /etc/os-release | grep -w NAME | awk -F '=' '{print $2;}' | \
|
|
||||||
awk -F '"' '{if (NF==3) {print $2} else {print $1}}' | awk '{print $1}')
|
|
||||||
if [ $osname = 'Ubuntu' -o $osname = 'Debian' ]; then
|
|
||||||
LIB_VERSION=lib
|
|
||||||
else
|
|
||||||
LIB_VERSION=lib64
|
|
||||||
fi
|
|
||||||
elif [ "$uname" = "Darwin" ]; then
|
|
||||||
LIB_VERSION=lib
|
LIB_VERSION=lib
|
||||||
else
|
else
|
||||||
LIB_VERSION=lib64
|
LIB_VERSION=lib64
|
||||||
|
|
@ -113,7 +99,7 @@ elif [ "$uname" = "FreeBSD" ]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $have_pthread -eq 0 ] && [ "$uname" != "Darwin" ]; then
|
if [ $have_pthread -eq 0 ] && [ "$uname" != "Darwin" ]; then
|
||||||
/sbin/ldconfig -p | grep -F libpthread.so > /dev/null
|
/sbin/ldconfig -p | fgrep libpthread.so > /dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
LIBS="$LIBS -lpthread"
|
LIBS="$LIBS -lpthread"
|
||||||
else
|
else
|
||||||
|
|
@ -168,35 +154,28 @@ perl -pi -e "s#\\\$\(LIBS\)#$LIBS#g" Makefile
|
||||||
perl -pi -e "s#\\\$\(TARGET_PREFIX\)#$TARGET_PREFIX#g" Makefile
|
perl -pi -e "s#\\\$\(TARGET_PREFIX\)#$TARGET_PREFIX#g" Makefile
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
copy_file()
|
|
||||||
{
|
|
||||||
src=$1
|
|
||||||
dest=$2
|
|
||||||
|
|
||||||
if [ ! -f $TARGET_CONF_PATH/tracker.conf ]; then
|
|
||||||
cp -f conf/tracker.conf $TARGET_CONF_PATH/tracker.conf
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ "$1" = "install" ]; then
|
if [ "$1" = "install" ]; then
|
||||||
cd ..
|
cd ..
|
||||||
|
cp -f restart.sh $TARGET_PREFIX/bin
|
||||||
|
cp -f stop.sh $TARGET_PREFIX/bin
|
||||||
|
|
||||||
if [ "$uname" = "Linux" ]; then
|
if [ "$uname" = "Linux" ]; then
|
||||||
if [ "$WITH_LINUX_SERVICE" = "1" ]; then
|
if [ "$WITH_LINUX_SERVICE" = "1" ]; then
|
||||||
if [ ! -d $TARGET_CONF_PATH ]; then
|
if [ ! -d /etc/fdfs ]; then
|
||||||
mkdir -p $TARGET_CONF_PATH
|
mkdir -p /etc/fdfs
|
||||||
cp -f conf/tracker.conf $TARGET_CONF_PATH/tracker.conf
|
cp -f conf/tracker.conf $TARGET_CONF_PATH/tracker.conf.sample
|
||||||
cp -f conf/storage.conf $TARGET_CONF_PATH/storage.conf
|
cp -f conf/storage.conf $TARGET_CONF_PATH/storage.conf.sample
|
||||||
cp -f conf/client.conf $TARGET_CONF_PATH/client.conf
|
cp -f conf/client.conf $TARGET_CONF_PATH/client.conf.sample
|
||||||
cp -f conf/storage_ids.conf $TARGET_CONF_PATH/storage_ids.conf
|
cp -f conf/storage_ids.conf $TARGET_CONF_PATH/storage_ids.conf.sample
|
||||||
cp -f conf/http.conf $TARGET_CONF_PATH/http.conf
|
cp -f conf/http.conf $TARGET_CONF_PATH/http.conf.sample
|
||||||
cp -f conf/mime.types $TARGET_CONF_PATH/mime.types
|
cp -f conf/mime.types $TARGET_CONF_PATH/mime.types.sample
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -f $TARGET_SYSTEMD_PATH/fdfs_trackerd.service ]; then
|
|
||||||
mkdir -p $TARGET_SYSTEMD_PATH
|
|
||||||
cp -f systemd/fdfs_trackerd.service $TARGET_SYSTEMD_PATH
|
|
||||||
cp -f systemd/fdfs_storaged.service $TARGET_SYSTEMD_PATH
|
|
||||||
fi
|
fi
|
||||||
|
mkdir -p $TARGET_INIT_PATH
|
||||||
|
cp -f init.d/fdfs_trackerd $TARGET_INIT_PATH
|
||||||
|
cp -f init.d/fdfs_storaged $TARGET_INIT_PATH
|
||||||
|
# /sbin/chkconfig --add fdfs_trackerd
|
||||||
|
# /sbin/chkconfig --add fdfs_storaged
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,96 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ -z "$1" ]; then
|
||||||
|
/bin/echo "$0 <command line>"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f /bin/awk ]; then
|
||||||
|
AWK=/bin/awk
|
||||||
|
else
|
||||||
|
AWK=/usr/bin/awk
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f /bin/grep ]; then
|
||||||
|
GREP=/bin/grep
|
||||||
|
else
|
||||||
|
GREP=/usr/bin/grep
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f /bin/expr ]; then
|
||||||
|
EXPR=/bin/expr
|
||||||
|
else
|
||||||
|
EXPR=/usr/bin/expr
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f /bin/sed ]; then
|
||||||
|
SED=/bin/sed
|
||||||
|
else
|
||||||
|
SED=/usr/bin/sed
|
||||||
|
fi
|
||||||
|
|
||||||
|
program=`/bin/echo $1 | $AWK -F '/' '{print $NF;}'`
|
||||||
|
param=''
|
||||||
|
grep_cmd="$GREP -w $program"
|
||||||
|
|
||||||
|
list='2 3 4 5 6 7 8 9'
|
||||||
|
for i in $list; do
|
||||||
|
eval p='$'$i
|
||||||
|
if [ -z "$p" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
param="$param $p"
|
||||||
|
#first_ch=`$EXPR substr "$p" 1 1`
|
||||||
|
first_ch=`/bin/echo "$p" | $SED -e 's/\(.\).*/\1/'`
|
||||||
|
if [ "$first_ch" = "-" ]; then
|
||||||
|
p="'\\$p'"
|
||||||
|
fi
|
||||||
|
grep_cmd="$grep_cmd | $GREP -w $p"
|
||||||
|
done
|
||||||
|
|
||||||
|
cmd="/bin/ps auxww | $grep_cmd | $GREP -v grep | $GREP -v $0 | $AWK '{print \$2;}'"
|
||||||
|
pids=`/bin/sh -c "$cmd"`
|
||||||
|
if [ ! -z "$pids" ]; then
|
||||||
|
i=0
|
||||||
|
count=0
|
||||||
|
/bin/echo "stopping $program ..."
|
||||||
|
while [ 1 -eq 1 ]; do
|
||||||
|
new_pids=''
|
||||||
|
for pid in $pids; do
|
||||||
|
if [ $i -eq 0 ]; then
|
||||||
|
/bin/kill $pid
|
||||||
|
else
|
||||||
|
/bin/kill $pid >/dev/null 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
new_pids="$new_pids $pid"
|
||||||
|
fi
|
||||||
|
count=`$EXPR $count + 1`
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "$new_pids" ]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
pids="$new_pids"
|
||||||
|
/usr/bin/printf .
|
||||||
|
/bin/sleep 1
|
||||||
|
i=`$EXPR $i + 1`
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
/bin/echo ""
|
||||||
|
cmd="/bin/ps auxww | $grep_cmd | $GREP -v grep | $GREP -v $0 | /usr/bin/wc -l"
|
||||||
|
count=`/bin/sh -c "$cmd"`
|
||||||
|
if [ $count -eq 0 ]; then
|
||||||
|
/bin/echo "starting $program ..."
|
||||||
|
exec $1 $param
|
||||||
|
exit $?
|
||||||
|
else
|
||||||
|
cmd="/bin/ps auxww | $grep_cmd | $GREP -v grep | $GREP -v $0"
|
||||||
|
/bin/sh -c "$cmd"
|
||||||
|
/bin/echo "already running $program count: $count, restart aborted!"
|
||||||
|
exit 16
|
||||||
|
fi
|
||||||
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue