~aleteoryx/muditaos

bb2df2a32ccb9e1cfbc11161ec7dfb2b044a7b0e — Bartosz 3 years ago bb0ef54
[MOS-803] Fixes

Fixes for deviceInfo endpoint, scripts
 and missing migration files in sysroot
M module-services/service-desktop/endpoints/deviceInfo/DeviceInfoEndpointCommon.cpp => module-services/service-desktop/endpoints/deviceInfo/DeviceInfoEndpointCommon.cpp +3 -15
@@ 3,19 3,10 @@

#include <endpoints/deviceInfo/DeviceInfoEndpointCommon.hpp>
#include <endpoints/message/Sender.hpp>
#include <endpoints/JsonKeyNames.hpp>

#include <EventStore.hpp>
#include <product/version.hpp>
#include <service-desktop/ServiceDesktop.hpp>

#include <cstdint>
#include <string>
#include <vector>
#include <sys/statvfs.h>
#include <purefs/filesystem_paths.hpp>

#include <ctime>

namespace sdesktop::endpoints
{


@@ 37,8 28,7 @@ namespace sdesktop::endpoints
    auto DeviceInfoEndpointCommon::handleGet(Context &context) -> http::Code
    {
        const auto &requestBody = context.getBody();

        if (requestBody[json::fileList].is_number()) {
        if (not requestBody.object_items().empty() and requestBody[json::fileList].is_number()) {

            const auto diagFileType = parseDiagnosticFileType(requestBody[json::fileList]);



@@ 47,12 37,10 @@ namespace sdesktop::endpoints

                return http::Code::BadRequest;
            }

            return gatherListOfDiagnostics(context, diagFileType);
        }
        else {
            return getDeviceInfo(context);
        }

        return getDeviceInfo(context);
    }

    auto DeviceInfoEndpointCommon::parseDiagnosticFileType(const json11::Json &fileList) -> DiagnosticFileType

M products/BellHybrid/services/desktop/endpoints/deviceInfo/DeviceInfoEndpoint.cpp => products/BellHybrid/services/desktop/endpoints/deviceInfo/DeviceInfoEndpoint.cpp +4 -4
@@ 42,10 42,10 @@ namespace sdesktop::endpoints
             {json::currentRTCTime, std::to_string(static_cast<uint32_t>(std::time(nullptr)))},
             {json::version, std::string(VERSION)},
             {json::serialNumber, getSerialNumber()},
             {json::recoveryStatusFilePath, purefs::dir::getTemporaryPath() / recoveryStatusFilename},
             {json::updateFilePath, purefs::dir::getTemporaryPath() / updateFilename},
             {json::backupFilePath, purefs::dir::getTemporaryPath() / backupFilename},
             {json::syncFilePath, purefs::dir::getTemporaryPath() / syncFilename}}));
             {json::recoveryStatusFilePath, (purefs::dir::getTemporaryPath() / recoveryStatusFilename).string()},
             {json::updateFilePath, (purefs::dir::getTemporaryPath() / updateFilename).string()},
             {json::backupFilePath, (purefs::dir::getTemporaryPath() / backupFilename).string()},
             {json::syncFilePath, (purefs::dir::getTemporaryPath() / syncFilename).string()}}));

        return http::Code::OK;
    }

M products/PurePhone/services/desktop/endpoints/deviceInfo/DeviceInfoEndpoint.cpp => products/PurePhone/services/desktop/endpoints/deviceInfo/DeviceInfoEndpoint.cpp +4 -4
@@ 57,10 57,10 @@ namespace sdesktop::endpoints
             {json::version, std::string(VERSION)},
             {json::serialNumber, getSerialNumber()},
             {json::caseColour, getCaseColour()},
             {json::recoveryStatusFilePath, purefs::dir::getTemporaryPath() / recoveryStatusFilename},
             {json::updateFilePath, purefs::dir::getTemporaryPath() / updateFilename},
             {json::backupFilePath, purefs::dir::getTemporaryPath() / backupFilename},
             {json::syncFilePath, purefs::dir::getTemporaryPath() / syncFilename},
             {json::recoveryStatusFilePath, (purefs::dir::getTemporaryPath() / recoveryStatusFilename).string()},
             {json::updateFilePath, (purefs::dir::getTemporaryPath() / updateFilename).string()},
             {json::backupFilePath, (purefs::dir::getTemporaryPath() / backupFilename).string()},
             {json::syncFilePath, (purefs::dir::getTemporaryPath() / syncFilename).string()},
             {json::deviceToken, getDeviceToken()}}));

        return http::Code::OK;

M scripts/lua/backup.lua => scripts/lua/backup.lua +1 -1
@@ 16,7 16,7 @@ backup.img_failure = "assets/gui_image_backup_failed.bin"
local function check_available_space()
    local db_size = helpers.dir_size_filtered(paths.db_dir, match_db_files)
    local version_size = lfs.attributes(paths.version_file, 'size')
    local available_space = recovery.sys.free_space(paths.user_dir)
    local available_space = recovery.sys.free_space(recovery.sys.user())
    -- Multiply the result by two due to the internal padding inside tar
    local required_space = (db_size + version_size) * 2


M scripts/lua/restore.lua => scripts/lua/restore.lua +1 -1
@@ 18,7 18,7 @@ restore.img_failure = "assets/gui_image_restore_failed.bin"

local function check_available_space()
    local backup_size = lfs.attributes(paths.backup_file, 'size')
    local available_space = recovery.sys.free_space(paths.user_dir)
    local available_space = recovery.sys.free_space(recovery.sys.user())

    print(string.format("Checking disk space:\nNeeded space: %d bytes, available space: %d bytes", backup_size,
        available_space))

M tools/init_databases.py => tools/init_databases.py +3 -1
@@ 9,6 9,7 @@ import argparse
import logging
import sys
import json
import shutil

log = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=logging.INFO)


@@ 29,7 30,7 @@ def migrate_database_up(database: str, migration_path: os.path, dst_directory: o
    try:
        connection = sqlite3.connect(dst_db_path)
        log.info(f"\nPerforming up-migration of {database} to {dst_version}")
        for i in range(dst_version+1):
        for i in range(dst_version + 1):
            migration_script = os.path.join(migration_path, *[database, str(i), "up.sql"])
            devel_script = os.path.join(migration_path, *[database, str(i), "devel.sql"])
            with open(migration_script) as ms:


@@ 115,6 116,7 @@ def main() -> int:
    for database_path in [args.common_path, args.product_path]:
        migration_path = os.path.join(database_path, migration_folder_name)
        ret |= migrate_database_wrapper(migration_path, json_data, args.output_path, args.development)
        shutil.copytree(migration_path, os.path.join(args.output_path, migration_folder_name), dirs_exist_ok=True)

    return ret